From 320715f3e77cc165535aef4abd865452608cfcb4 Mon Sep 17 00:00:00 2001 From: DevMiner Date: Sun, 11 Aug 2024 03:51:22 +0200 Subject: [PATCH] chore: init --- .env | 14 + .gitignore | 4 + LICENSE | 662 ++ README.md | 63 + compose.yml | 16 + config/config.go | 55 + ent/attachment.go | 310 + ent/attachment/attachment.go | 190 + ent/attachment/where.go | 745 ++ ent/attachment_create.go | 1362 ++++ ent/attachment_delete.go | 88 + ent/attachment_query.go | 614 ++ ent/attachment_update.go | 843 +++ ent/client.go | 1247 ++++ ent/ent.go | 618 ++ ent/enttest/enttest.go | 84 + ent/follow.go | 237 + ent/follow/follow.go | 187 + ent/follow/where.go | 313 + ent/follow_create.go | 854 +++ ent/follow_delete.go | 88 + ent/follow_query.go | 688 ++ ent/follow_update.go | 567 ++ ent/generate.go | 3 + ent/hook/hook.go | 259 + ent/image.go | 114 + ent/image/image.go | 55 + ent/image/where.go | 208 + ent/image_create.go | 507 ++ ent/image_delete.go | 88 + ent/image_query.go | 526 ++ ent/image_update.go | 243 + ent/internal/schema.go | 9 + ent/migrate/migrate.go | 64 + ent/migrate/schema.go | 265 + ent/mutation.go | 6055 +++++++++++++++++ ent/note.go | 277 + ent/note/note.go | 257 + ent/note/where.go | 501 ++ ent/note_create.go | 1087 +++ ent/note_delete.go | 88 + ent/note_query.go | 794 +++ ent/note_update.go | 924 +++ ent/predicate/predicate.go | 25 + ent/runtime.go | 231 + ent/runtime/runtime.go | 10 + ent/schema/attachment.go | 34 + ent/schema/follow.go | 33 + ent/schema/image.go | 19 + ent/schema/lysand_entity.go | 40 + ent/schema/note.go | 32 + ent/schema/server_metadata.go | 43 + ent/schema/user.go | 65 + ent/servermetadata.go | 275 + ent/servermetadata/servermetadata.go | 185 + ent/servermetadata/where.go | 513 ++ ent/servermetadata_create.go | 1035 +++ ent/servermetadata_delete.go | 88 + ent/servermetadata_query.go | 688 ++ ent/servermetadata_update.go | 704 ++ ent/tx.go | 225 + ent/user.go | 423 ++ ent/user/user.go | 354 + ent/user/where.go | 1140 ++++ ent/user_create.go | 1752 +++++ ent/user_delete.go | 88 + ent/user_query.go | 868 +++ ent/user_update.go | 1456 ++++ entsqlite.go | 29 + fiber_error_handler.go | 32 + go.mod | 91 + go.sum | 271 + internal/api_schema/api.go | 86 + internal/api_schema/errors.go | 15 + internal/api_schema/notes.go | 18 + internal/api_schema/users.go | 17 + internal/database/transaction.go | 85 + internal/entity/follow.go | 76 + internal/entity/note.go | 70 + internal/entity/user.go | 139 + internal/handlers/follow_handler/handler.go | 33 + .../follow_handler/lysand_follow_get.go | 28 + internal/handlers/meta_handler/handler.go | 28 + .../lysand_server_metadata_get.go | 28 + .../meta_handler/wellknown_host_meta.go | 23 + .../handlers/note_handler/app_note_create.go | 32 + .../handlers/note_handler/app_note_get.go | 28 + internal/handlers/note_handler/handler.go | 35 + .../handlers/user_handler/app_user_create.go | 35 + .../handlers/user_handler/app_user_get.go | 31 + internal/handlers/user_handler/handler.go | 46 + .../handlers/user_handler/lysand_inbox.go | 57 + .../handlers/user_handler/lysand_user_get.go | 31 + internal/handlers/user_handler/robots_txt.go | 9 + .../user_handler/wellknown_webfinger.go | 32 + internal/helpers/crypto.go | 22 + internal/helpers/ptr.go | 5 + .../repo_impls/follow_repository_impl.go | 171 + internal/repository/repo_impls/manager.go | 90 + .../repo_impls/note_repository_impl.go | 117 + .../repo_impls/user_repository_impl.go | 325 + internal/repository/repository.go | 49 + internal/service/service.go | 49 + .../service/svc_impls/Inbox_service_impl.go | 147 + .../svc_impls/federation_service_impl.go | 56 + .../service/svc_impls/follow_service_impl.go | 102 + .../service/svc_impls/note_service_impl.go | 98 + .../service/svc_impls/task_service_impl.go | 49 + .../service/svc_impls/user_service_impl.go | 122 + internal/tasks/federate_follow.go | 11 + internal/tasks/federate_note.go | 48 + internal/tasks/handler.go | 53 + internal/utils/mapper.go | 52 + internal/utils/urls.go | 75 + .../val_impls/body_validator_impl.go | 90 + .../val_impls/request_validator_impl.go | 109 + internal/validators/validator.go | 16 + main.go | 324 + pkg/lysand/action_follow.go | 51 + pkg/lysand/action_undo.go | 19 + pkg/lysand/actor_user.go | 150 + pkg/lysand/attachment.go | 26 + pkg/lysand/content_types.go | 85 + pkg/lysand/crypto.go | 92 + pkg/lysand/crypto_test.go | 100 + pkg/lysand/entity.go | 43 + pkg/lysand/federation_client.go | 123 + pkg/lysand/inbox.go | 72 + pkg/lysand/public_key.go | 66 + pkg/lysand/public_key_test.go | 33 + pkg/lysand/publication.go | 118 + pkg/lysand/publication_note.go | 12 + pkg/lysand/publication_patch.go | 29 + pkg/lysand/server_metadata.go | 64 + pkg/lysand/signature.go | 70 + pkg/lysand/signature_header.go | 66 + pkg/lysand/signature_header_test.go | 41 + pkg/lysand/time.go | 57 + pkg/lysand/url.go | 54 + pkg/taskqueue/client.go | 288 + pkg/webfinger/host_meta.go | 51 + pkg/webfinger/webfinger.go | 72 + scripts/ssl-tunnel.sh | 3 + web/.env | 5 + web/.eslintrc.cjs | 18 + web/.gitignore | 24 + web/index.html | 13 + web/package.json | 46 + web/pnpm-lock.yaml | 3519 ++++++++++ web/postcss.config.js | 6 + web/public/vite.svg | 1 + web/src/api/auth.ts | 51 + web/src/api/index.ts | 28 + web/src/api/notes.ts | 40 + web/src/api/response.ts | 42 + web/src/assets/react.svg | 1 + web/src/env.ts | 9 + web/src/hooks/useCreateNote.ts | 33 + web/src/hooks/useCreateUser.ts | 27 + web/src/index.css | 3 + web/src/instrument.ts | 28 + web/src/main.tsx | 23 + web/src/routeTree.gen.ts | 100 + web/src/routes/__root.tsx | 31 + web/src/routes/index.lazy.tsx | 13 + web/src/routes/post.lazy.tsx | 42 + web/src/routes/register.lazy.tsx | 22 + web/src/routing.ts | 10 + web/src/vite-env.d.ts | 1 + web/tailwind.config.js | 8 + web/tsconfig.app.json | 31 + web/tsconfig.json | 11 + web/tsconfig.node.json | 15 + web/vite.config.ts | 13 + 174 files changed, 42083 insertions(+) create mode 100644 .env create mode 100644 .gitignore create mode 100644 LICENSE create mode 100644 README.md create mode 100644 compose.yml create mode 100644 config/config.go create mode 100644 ent/attachment.go create mode 100644 ent/attachment/attachment.go create mode 100644 ent/attachment/where.go create mode 100644 ent/attachment_create.go create mode 100644 ent/attachment_delete.go create mode 100644 ent/attachment_query.go create mode 100644 ent/attachment_update.go create mode 100644 ent/client.go create mode 100644 ent/ent.go create mode 100644 ent/enttest/enttest.go create mode 100644 ent/follow.go create mode 100644 ent/follow/follow.go create mode 100644 ent/follow/where.go create mode 100644 ent/follow_create.go create mode 100644 ent/follow_delete.go create mode 100644 ent/follow_query.go create mode 100644 ent/follow_update.go create mode 100644 ent/generate.go create mode 100644 ent/hook/hook.go create mode 100644 ent/image.go create mode 100644 ent/image/image.go create mode 100644 ent/image/where.go create mode 100644 ent/image_create.go create mode 100644 ent/image_delete.go create mode 100644 ent/image_query.go create mode 100644 ent/image_update.go create mode 100644 ent/internal/schema.go create mode 100644 ent/migrate/migrate.go create mode 100644 ent/migrate/schema.go create mode 100644 ent/mutation.go create mode 100644 ent/note.go create mode 100644 ent/note/note.go create mode 100644 ent/note/where.go create mode 100644 ent/note_create.go create mode 100644 ent/note_delete.go create mode 100644 ent/note_query.go create mode 100644 ent/note_update.go create mode 100644 ent/predicate/predicate.go create mode 100644 ent/runtime.go create mode 100644 ent/runtime/runtime.go create mode 100644 ent/schema/attachment.go create mode 100644 ent/schema/follow.go create mode 100644 ent/schema/image.go create mode 100644 ent/schema/lysand_entity.go create mode 100644 ent/schema/note.go create mode 100644 ent/schema/server_metadata.go create mode 100644 ent/schema/user.go create mode 100644 ent/servermetadata.go create mode 100644 ent/servermetadata/servermetadata.go create mode 100644 ent/servermetadata/where.go create mode 100644 ent/servermetadata_create.go create mode 100644 ent/servermetadata_delete.go create mode 100644 ent/servermetadata_query.go create mode 100644 ent/servermetadata_update.go create mode 100644 ent/tx.go create mode 100644 ent/user.go create mode 100644 ent/user/user.go create mode 100644 ent/user/where.go create mode 100644 ent/user_create.go create mode 100644 ent/user_delete.go create mode 100644 ent/user_query.go create mode 100644 ent/user_update.go create mode 100644 entsqlite.go create mode 100644 fiber_error_handler.go create mode 100644 go.mod create mode 100644 go.sum create mode 100644 internal/api_schema/api.go create mode 100644 internal/api_schema/errors.go create mode 100644 internal/api_schema/notes.go create mode 100644 internal/api_schema/users.go create mode 100644 internal/database/transaction.go create mode 100644 internal/entity/follow.go create mode 100644 internal/entity/note.go create mode 100644 internal/entity/user.go create mode 100644 internal/handlers/follow_handler/handler.go create mode 100644 internal/handlers/follow_handler/lysand_follow_get.go create mode 100644 internal/handlers/meta_handler/handler.go create mode 100644 internal/handlers/meta_handler/lysand_server_metadata_get.go create mode 100644 internal/handlers/meta_handler/wellknown_host_meta.go create mode 100644 internal/handlers/note_handler/app_note_create.go create mode 100644 internal/handlers/note_handler/app_note_get.go create mode 100644 internal/handlers/note_handler/handler.go create mode 100644 internal/handlers/user_handler/app_user_create.go create mode 100644 internal/handlers/user_handler/app_user_get.go create mode 100644 internal/handlers/user_handler/handler.go create mode 100644 internal/handlers/user_handler/lysand_inbox.go create mode 100644 internal/handlers/user_handler/lysand_user_get.go create mode 100644 internal/handlers/user_handler/robots_txt.go create mode 100644 internal/handlers/user_handler/wellknown_webfinger.go create mode 100644 internal/helpers/crypto.go create mode 100644 internal/helpers/ptr.go create mode 100644 internal/repository/repo_impls/follow_repository_impl.go create mode 100644 internal/repository/repo_impls/manager.go create mode 100644 internal/repository/repo_impls/note_repository_impl.go create mode 100644 internal/repository/repo_impls/user_repository_impl.go create mode 100644 internal/repository/repository.go create mode 100644 internal/service/service.go create mode 100644 internal/service/svc_impls/Inbox_service_impl.go create mode 100644 internal/service/svc_impls/federation_service_impl.go create mode 100644 internal/service/svc_impls/follow_service_impl.go create mode 100644 internal/service/svc_impls/note_service_impl.go create mode 100644 internal/service/svc_impls/task_service_impl.go create mode 100644 internal/service/svc_impls/user_service_impl.go create mode 100644 internal/tasks/federate_follow.go create mode 100644 internal/tasks/federate_note.go create mode 100644 internal/tasks/handler.go create mode 100644 internal/utils/mapper.go create mode 100644 internal/utils/urls.go create mode 100644 internal/validators/val_impls/body_validator_impl.go create mode 100644 internal/validators/val_impls/request_validator_impl.go create mode 100644 internal/validators/validator.go create mode 100644 main.go create mode 100644 pkg/lysand/action_follow.go create mode 100644 pkg/lysand/action_undo.go create mode 100644 pkg/lysand/actor_user.go create mode 100644 pkg/lysand/attachment.go create mode 100644 pkg/lysand/content_types.go create mode 100644 pkg/lysand/crypto.go create mode 100644 pkg/lysand/crypto_test.go create mode 100644 pkg/lysand/entity.go create mode 100644 pkg/lysand/federation_client.go create mode 100644 pkg/lysand/inbox.go create mode 100644 pkg/lysand/public_key.go create mode 100644 pkg/lysand/public_key_test.go create mode 100644 pkg/lysand/publication.go create mode 100644 pkg/lysand/publication_note.go create mode 100644 pkg/lysand/publication_patch.go create mode 100644 pkg/lysand/server_metadata.go create mode 100644 pkg/lysand/signature.go create mode 100644 pkg/lysand/signature_header.go create mode 100644 pkg/lysand/signature_header_test.go create mode 100644 pkg/lysand/time.go create mode 100644 pkg/lysand/url.go create mode 100644 pkg/taskqueue/client.go create mode 100644 pkg/webfinger/host_meta.go create mode 100644 pkg/webfinger/webfinger.go create mode 100755 scripts/ssl-tunnel.sh create mode 100644 web/.env create mode 100644 web/.eslintrc.cjs create mode 100644 web/.gitignore create mode 100644 web/index.html create mode 100644 web/package.json create mode 100644 web/pnpm-lock.yaml create mode 100644 web/postcss.config.js create mode 100644 web/public/vite.svg create mode 100644 web/src/api/auth.ts create mode 100644 web/src/api/index.ts create mode 100644 web/src/api/notes.ts create mode 100644 web/src/api/response.ts create mode 100644 web/src/assets/react.svg create mode 100644 web/src/env.ts create mode 100644 web/src/hooks/useCreateNote.ts create mode 100644 web/src/hooks/useCreateUser.ts create mode 100644 web/src/index.css create mode 100644 web/src/instrument.ts create mode 100644 web/src/main.tsx create mode 100644 web/src/routeTree.gen.ts create mode 100644 web/src/routes/__root.tsx create mode 100644 web/src/routes/index.lazy.tsx create mode 100644 web/src/routes/post.lazy.tsx create mode 100644 web/src/routes/register.lazy.tsx create mode 100644 web/src/routing.ts create mode 100644 web/src/vite-env.d.ts create mode 100644 web/tailwind.config.js create mode 100644 web/tsconfig.app.json create mode 100644 web/tsconfig.json create mode 100644 web/tsconfig.node.json create mode 100644 web/vite.config.ts diff --git a/.env b/.env new file mode 100644 index 0000000..ba031b0 --- /dev/null +++ b/.env @@ -0,0 +1,14 @@ +PUBLIC_ADDRESS="https://localhost" +INSTANCE_NAME="lysand-test" +INSTANCE_DESCRIPTION="Versia-Go Instance" + +NATS_URI="nats://localhost:4222" + +# SQLite +DATABASE_URI="file:./versia-go.db?cache=shared&_fk=1" +# PostgreSQL +# DATABASE_URI="postgres://postgres:postgres@localhost:5432/postgres?sslmode=disable" + +ENVIRONMENT="development" +OTLP_ENDPOINT="" +SENTRY_DSN="" diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..315f358 --- /dev/null +++ b/.gitignore @@ -0,0 +1,4 @@ +cert.pem +key.pem +*.db +.env.* \ No newline at end of file diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..15bc112 --- /dev/null +++ b/LICENSE @@ -0,0 +1,662 @@ + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/README.md b/README.md new file mode 100644 index 0000000..3f21416 --- /dev/null +++ b/README.md @@ -0,0 +1,63 @@ +# Versia-Go + +Versia-Go is a experimental implementation of the (not yet renamed :P) [Versia](https://lysand.org) protocol written in +Go. + +> Compatibility level: Lysand 3.1 (sort of) + +> ⚠️ This project is still in development and is not ready for production use. + +## Developing + +### Requirements + +- Go 1.22.5+ +- Docker + Docker Compose v2 + +### Running + +```shell +git clone https://github.com/lysand/versia-go.git +cd versia-go + +docker compose up -d nats + +touch .env.local +# Add the changed variables from .env to .env.local + +go run . +``` + +## TODO + +- [ ] Notes + - [ ] API + - [ ] Allow choosing the publishing user + - [x] Federating notes +- [ ] Follows + - [ ] API + - [x] Automatic follows for public users + - [ ] Unfollows (scheduled for Lysand Working Draft 4) + - [ ] API +- [ ] Users + - [ ] API + - [x] Create user + - [ ] Lysand API + - [x] Get user (from local) + - [x] Webfinger + - [ ] Inbox handling + - [ ] Federated notes + - [ ] Federated unfollows + - [x] Federated follows + - [x] Receiving federated users +- [ ] Web +- Extensions + - [ ] Emojis + +## License + +Versia-Go is licensed under the GNU Affero General Public License v3.0. + +See [LICENSE](LICENSE) for more information. + +> ℹ️ This project might get relicensed to a different license in the future. diff --git a/compose.yml b/compose.yml new file mode 100644 index 0000000..dab5c99 --- /dev/null +++ b/compose.yml @@ -0,0 +1,16 @@ +services: + db: + image: postgres:16.2-alpine + environment: + POSTGRES_DB: postgres + POSTGRES_USER: postgres + POSTGRES_PASSWORD: postgres + ports: + - "5432:5432" + + nats: + image: nats:2.9.25-scratch + ports: + - "4222:4222" + - "8222:8222" + command: "--js" diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..00d9f83 --- /dev/null +++ b/config/config.go @@ -0,0 +1,55 @@ +package config + +import ( + "net/url" + "os" + + "git.devminer.xyz/devminer/unitel" + "github.com/joho/godotenv" + "github.com/rs/zerolog/log" +) + +type Config struct { + PublicAddress *url.URL + InstanceName string + InstanceDescription *string + + NATSURI string + DatabaseURI string + + Telemetry unitel.Opts +} + +var C Config + +func Load() { + if err := godotenv.Load(".env.local", ".env"); err != nil { + log.Warn().Err(err).Msg("Failed to load .env file") + } + + publicAddress, err := url.Parse(os.Getenv("PUBLIC_ADDRESS")) + if err != nil { + log.Fatal().Err(err).Msg("Failed to parse PUBLIC_ADDRESS") + } + + C = Config{ + PublicAddress: publicAddress, + InstanceName: os.Getenv("INSTANCE_NAME"), + InstanceDescription: optionalEnvStr("INSTANCE_DESCRIPTION"), + + NATSURI: os.Getenv("NATS_URI"), + DatabaseURI: os.Getenv("DATABASE_URI"), + + Telemetry: unitel.ParseOpts("versia-go"), + } + + return +} + +func optionalEnvStr(key string) *string { + value := os.Getenv(key) + if value == "" { + return nil + } + return &value +} diff --git a/ent/attachment.go b/ent/attachment.go new file mode 100644 index 0000000..34ca07d --- /dev/null +++ b/ent/attachment.go @@ -0,0 +1,310 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// Attachment is the model entity for the Attachment schema. +type Attachment struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // IsRemote holds the value of the "isRemote" field. + IsRemote bool `json:"isRemote,omitempty"` + // URI holds the value of the "uri" field. + URI string `json:"uri,omitempty"` + // Extensions holds the value of the "extensions" field. + Extensions lysand.Extensions `json:"extensions,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Description holds the value of the "description" field. + Description string `json:"description,omitempty"` + // Sha256 holds the value of the "sha256" field. + Sha256 []byte `json:"sha256,omitempty"` + // Size holds the value of the "size" field. + Size int `json:"size,omitempty"` + // Blurhash holds the value of the "blurhash" field. + Blurhash *string `json:"blurhash,omitempty"` + // Height holds the value of the "height" field. + Height *int `json:"height,omitempty"` + // Width holds the value of the "width" field. + Width *int `json:"width,omitempty"` + // Fps holds the value of the "fps" field. + Fps *int `json:"fps,omitempty"` + // MimeType holds the value of the "mimeType" field. + MimeType string `json:"mimeType,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the AttachmentQuery when eager-loading is set. + Edges AttachmentEdges `json:"edges"` + attachment_author *uuid.UUID + note_attachments *uuid.UUID + selectValues sql.SelectValues +} + +// AttachmentEdges holds the relations/edges for other nodes in the graph. +type AttachmentEdges struct { + // Author holds the value of the author edge. + Author *User `json:"author,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [1]bool +} + +// AuthorOrErr returns the Author value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e AttachmentEdges) AuthorOrErr() (*User, error) { + if e.Author != nil { + return e.Author, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "author"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Attachment) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case attachment.FieldExtensions, attachment.FieldSha256: + values[i] = new([]byte) + case attachment.FieldIsRemote: + values[i] = new(sql.NullBool) + case attachment.FieldSize, attachment.FieldHeight, attachment.FieldWidth, attachment.FieldFps: + values[i] = new(sql.NullInt64) + case attachment.FieldURI, attachment.FieldDescription, attachment.FieldBlurhash, attachment.FieldMimeType: + values[i] = new(sql.NullString) + case attachment.FieldCreatedAt, attachment.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case attachment.FieldID: + values[i] = new(uuid.UUID) + case attachment.ForeignKeys[0]: // attachment_author + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + case attachment.ForeignKeys[1]: // note_attachments + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Attachment fields. +func (a *Attachment) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case attachment.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + a.ID = *value + } + case attachment.FieldIsRemote: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isRemote", values[i]) + } else if value.Valid { + a.IsRemote = value.Bool + } + case attachment.FieldURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field uri", values[i]) + } else if value.Valid { + a.URI = value.String + } + case attachment.FieldExtensions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extensions", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &a.Extensions); err != nil { + return fmt.Errorf("unmarshal field extensions: %w", err) + } + } + case attachment.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + a.CreatedAt = value.Time + } + case attachment.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + a.UpdatedAt = value.Time + } + case attachment.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + a.Description = value.String + } + case attachment.FieldSha256: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field sha256", values[i]) + } else if value != nil { + a.Sha256 = *value + } + case attachment.FieldSize: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field size", values[i]) + } else if value.Valid { + a.Size = int(value.Int64) + } + case attachment.FieldBlurhash: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field blurhash", values[i]) + } else if value.Valid { + a.Blurhash = new(string) + *a.Blurhash = value.String + } + case attachment.FieldHeight: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field height", values[i]) + } else if value.Valid { + a.Height = new(int) + *a.Height = int(value.Int64) + } + case attachment.FieldWidth: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field width", values[i]) + } else if value.Valid { + a.Width = new(int) + *a.Width = int(value.Int64) + } + case attachment.FieldFps: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for field fps", values[i]) + } else if value.Valid { + a.Fps = new(int) + *a.Fps = int(value.Int64) + } + case attachment.FieldMimeType: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field mimeType", values[i]) + } else if value.Valid { + a.MimeType = value.String + } + case attachment.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field attachment_author", values[i]) + } else if value.Valid { + a.attachment_author = new(uuid.UUID) + *a.attachment_author = *value.S.(*uuid.UUID) + } + case attachment.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field note_attachments", values[i]) + } else if value.Valid { + a.note_attachments = new(uuid.UUID) + *a.note_attachments = *value.S.(*uuid.UUID) + } + default: + a.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Attachment. +// This includes values selected through modifiers, order, etc. +func (a *Attachment) Value(name string) (ent.Value, error) { + return a.selectValues.Get(name) +} + +// QueryAuthor queries the "author" edge of the Attachment entity. +func (a *Attachment) QueryAuthor() *UserQuery { + return NewAttachmentClient(a.config).QueryAuthor(a) +} + +// Update returns a builder for updating this Attachment. +// Note that you need to call Attachment.Unwrap() before calling this method if this Attachment +// was returned from a transaction, and the transaction was committed or rolled back. +func (a *Attachment) Update() *AttachmentUpdateOne { + return NewAttachmentClient(a.config).UpdateOne(a) +} + +// Unwrap unwraps the Attachment entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (a *Attachment) Unwrap() *Attachment { + _tx, ok := a.config.driver.(*txDriver) + if !ok { + panic("ent: Attachment is not a transactional entity") + } + a.config.driver = _tx.drv + return a +} + +// String implements the fmt.Stringer. +func (a *Attachment) String() string { + var builder strings.Builder + builder.WriteString("Attachment(") + builder.WriteString(fmt.Sprintf("id=%v, ", a.ID)) + builder.WriteString("isRemote=") + builder.WriteString(fmt.Sprintf("%v", a.IsRemote)) + builder.WriteString(", ") + builder.WriteString("uri=") + builder.WriteString(a.URI) + builder.WriteString(", ") + builder.WriteString("extensions=") + builder.WriteString(fmt.Sprintf("%v", a.Extensions)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(a.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(a.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("description=") + builder.WriteString(a.Description) + builder.WriteString(", ") + builder.WriteString("sha256=") + builder.WriteString(fmt.Sprintf("%v", a.Sha256)) + builder.WriteString(", ") + builder.WriteString("size=") + builder.WriteString(fmt.Sprintf("%v", a.Size)) + builder.WriteString(", ") + if v := a.Blurhash; v != nil { + builder.WriteString("blurhash=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := a.Height; v != nil { + builder.WriteString("height=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := a.Width; v != nil { + builder.WriteString("width=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := a.Fps; v != nil { + builder.WriteString("fps=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + builder.WriteString("mimeType=") + builder.WriteString(a.MimeType) + builder.WriteByte(')') + return builder.String() +} + +// Attachments is a parsable slice of Attachment. +type Attachments []*Attachment diff --git a/ent/attachment/attachment.go b/ent/attachment/attachment.go new file mode 100644 index 0000000..060cc8f --- /dev/null +++ b/ent/attachment/attachment.go @@ -0,0 +1,190 @@ +// Code generated by ent, DO NOT EDIT. + +package attachment + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const ( + // Label holds the string label denoting the attachment type in the database. + Label = "attachment" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldIsRemote holds the string denoting the isremote field in the database. + FieldIsRemote = "is_remote" + // FieldURI holds the string denoting the uri field in the database. + FieldURI = "uri" + // FieldExtensions holds the string denoting the extensions field in the database. + FieldExtensions = "extensions" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldSha256 holds the string denoting the sha256 field in the database. + FieldSha256 = "sha256" + // FieldSize holds the string denoting the size field in the database. + FieldSize = "size" + // FieldBlurhash holds the string denoting the blurhash field in the database. + FieldBlurhash = "blurhash" + // FieldHeight holds the string denoting the height field in the database. + FieldHeight = "height" + // FieldWidth holds the string denoting the width field in the database. + FieldWidth = "width" + // FieldFps holds the string denoting the fps field in the database. + FieldFps = "fps" + // FieldMimeType holds the string denoting the mimetype field in the database. + FieldMimeType = "mime_type" + // EdgeAuthor holds the string denoting the author edge name in mutations. + EdgeAuthor = "author" + // Table holds the table name of the attachment in the database. + Table = "attachments" + // AuthorTable is the table that holds the author relation/edge. + AuthorTable = "attachments" + // AuthorInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + AuthorInverseTable = "users" + // AuthorColumn is the table column denoting the author relation/edge. + AuthorColumn = "attachment_author" +) + +// Columns holds all SQL columns for attachment fields. +var Columns = []string{ + FieldID, + FieldIsRemote, + FieldURI, + FieldExtensions, + FieldCreatedAt, + FieldUpdatedAt, + FieldDescription, + FieldSha256, + FieldSize, + FieldBlurhash, + FieldHeight, + FieldWidth, + FieldFps, + FieldMimeType, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "attachments" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "attachment_author", + "note_attachments", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // URIValidator is a validator for the "uri" field. It is called by the builders before save. + URIValidator func(string) error + // DefaultExtensions holds the default value on creation for the "extensions" field. + DefaultExtensions lysand.Extensions + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DescriptionValidator is a validator for the "description" field. It is called by the builders before save. + DescriptionValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the Attachment queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByIsRemote orders the results by the isRemote field. +func ByIsRemote(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsRemote, opts...).ToFunc() +} + +// ByURI orders the results by the uri field. +func ByURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURI, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// BySize orders the results by the size field. +func BySize(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSize, opts...).ToFunc() +} + +// ByBlurhash orders the results by the blurhash field. +func ByBlurhash(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBlurhash, opts...).ToFunc() +} + +// ByHeight orders the results by the height field. +func ByHeight(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldHeight, opts...).ToFunc() +} + +// ByWidth orders the results by the width field. +func ByWidth(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldWidth, opts...).ToFunc() +} + +// ByFps orders the results by the fps field. +func ByFps(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFps, opts...).ToFunc() +} + +// ByMimeType orders the results by the mimeType field. +func ByMimeType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMimeType, opts...).ToFunc() +} + +// ByAuthorField orders the results by author field. +func ByAuthorField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAuthorStep(), sql.OrderByField(field, opts...)) + } +} +func newAuthorStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AuthorInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AuthorTable, AuthorColumn), + ) +} diff --git a/ent/attachment/where.go b/ent/attachment/where.go new file mode 100644 index 0000000..3d9b50d --- /dev/null +++ b/ent/attachment/where.go @@ -0,0 +1,745 @@ +// Code generated by ent, DO NOT EDIT. + +package attachment + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldID, id)) +} + +// IsRemote applies equality check predicate on the "isRemote" field. It's identical to IsRemoteEQ. +func IsRemote(v bool) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldIsRemote, v)) +} + +// URI applies equality check predicate on the "uri" field. It's identical to URIEQ. +func URI(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldURI, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldDescription, v)) +} + +// Sha256 applies equality check predicate on the "sha256" field. It's identical to Sha256EQ. +func Sha256(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldSha256, v)) +} + +// Size applies equality check predicate on the "size" field. It's identical to SizeEQ. +func Size(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldSize, v)) +} + +// Blurhash applies equality check predicate on the "blurhash" field. It's identical to BlurhashEQ. +func Blurhash(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldBlurhash, v)) +} + +// Height applies equality check predicate on the "height" field. It's identical to HeightEQ. +func Height(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldHeight, v)) +} + +// Width applies equality check predicate on the "width" field. It's identical to WidthEQ. +func Width(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldWidth, v)) +} + +// Fps applies equality check predicate on the "fps" field. It's identical to FpsEQ. +func Fps(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldFps, v)) +} + +// MimeType applies equality check predicate on the "mimeType" field. It's identical to MimeTypeEQ. +func MimeType(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldMimeType, v)) +} + +// IsRemoteEQ applies the EQ predicate on the "isRemote" field. +func IsRemoteEQ(v bool) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldIsRemote, v)) +} + +// IsRemoteNEQ applies the NEQ predicate on the "isRemote" field. +func IsRemoteNEQ(v bool) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldIsRemote, v)) +} + +// URIEQ applies the EQ predicate on the "uri" field. +func URIEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldURI, v)) +} + +// URINEQ applies the NEQ predicate on the "uri" field. +func URINEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldURI, v)) +} + +// URIIn applies the In predicate on the "uri" field. +func URIIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldURI, vs...)) +} + +// URINotIn applies the NotIn predicate on the "uri" field. +func URINotIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldURI, vs...)) +} + +// URIGT applies the GT predicate on the "uri" field. +func URIGT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldURI, v)) +} + +// URIGTE applies the GTE predicate on the "uri" field. +func URIGTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldURI, v)) +} + +// URILT applies the LT predicate on the "uri" field. +func URILT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldURI, v)) +} + +// URILTE applies the LTE predicate on the "uri" field. +func URILTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldURI, v)) +} + +// URIContains applies the Contains predicate on the "uri" field. +func URIContains(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContains(FieldURI, v)) +} + +// URIHasPrefix applies the HasPrefix predicate on the "uri" field. +func URIHasPrefix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasPrefix(FieldURI, v)) +} + +// URIHasSuffix applies the HasSuffix predicate on the "uri" field. +func URIHasSuffix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasSuffix(FieldURI, v)) +} + +// URIEqualFold applies the EqualFold predicate on the "uri" field. +func URIEqualFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEqualFold(FieldURI, v)) +} + +// URIContainsFold applies the ContainsFold predicate on the "uri" field. +func URIContainsFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContainsFold(FieldURI, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContainsFold(FieldDescription, v)) +} + +// Sha256EQ applies the EQ predicate on the "sha256" field. +func Sha256EQ(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldSha256, v)) +} + +// Sha256NEQ applies the NEQ predicate on the "sha256" field. +func Sha256NEQ(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldSha256, v)) +} + +// Sha256In applies the In predicate on the "sha256" field. +func Sha256In(vs ...[]byte) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldSha256, vs...)) +} + +// Sha256NotIn applies the NotIn predicate on the "sha256" field. +func Sha256NotIn(vs ...[]byte) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldSha256, vs...)) +} + +// Sha256GT applies the GT predicate on the "sha256" field. +func Sha256GT(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldSha256, v)) +} + +// Sha256GTE applies the GTE predicate on the "sha256" field. +func Sha256GTE(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldSha256, v)) +} + +// Sha256LT applies the LT predicate on the "sha256" field. +func Sha256LT(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldSha256, v)) +} + +// Sha256LTE applies the LTE predicate on the "sha256" field. +func Sha256LTE(v []byte) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldSha256, v)) +} + +// SizeEQ applies the EQ predicate on the "size" field. +func SizeEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldSize, v)) +} + +// SizeNEQ applies the NEQ predicate on the "size" field. +func SizeNEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldSize, v)) +} + +// SizeIn applies the In predicate on the "size" field. +func SizeIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldSize, vs...)) +} + +// SizeNotIn applies the NotIn predicate on the "size" field. +func SizeNotIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldSize, vs...)) +} + +// SizeGT applies the GT predicate on the "size" field. +func SizeGT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldSize, v)) +} + +// SizeGTE applies the GTE predicate on the "size" field. +func SizeGTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldSize, v)) +} + +// SizeLT applies the LT predicate on the "size" field. +func SizeLT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldSize, v)) +} + +// SizeLTE applies the LTE predicate on the "size" field. +func SizeLTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldSize, v)) +} + +// BlurhashEQ applies the EQ predicate on the "blurhash" field. +func BlurhashEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldBlurhash, v)) +} + +// BlurhashNEQ applies the NEQ predicate on the "blurhash" field. +func BlurhashNEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldBlurhash, v)) +} + +// BlurhashIn applies the In predicate on the "blurhash" field. +func BlurhashIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldBlurhash, vs...)) +} + +// BlurhashNotIn applies the NotIn predicate on the "blurhash" field. +func BlurhashNotIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldBlurhash, vs...)) +} + +// BlurhashGT applies the GT predicate on the "blurhash" field. +func BlurhashGT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldBlurhash, v)) +} + +// BlurhashGTE applies the GTE predicate on the "blurhash" field. +func BlurhashGTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldBlurhash, v)) +} + +// BlurhashLT applies the LT predicate on the "blurhash" field. +func BlurhashLT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldBlurhash, v)) +} + +// BlurhashLTE applies the LTE predicate on the "blurhash" field. +func BlurhashLTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldBlurhash, v)) +} + +// BlurhashContains applies the Contains predicate on the "blurhash" field. +func BlurhashContains(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContains(FieldBlurhash, v)) +} + +// BlurhashHasPrefix applies the HasPrefix predicate on the "blurhash" field. +func BlurhashHasPrefix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasPrefix(FieldBlurhash, v)) +} + +// BlurhashHasSuffix applies the HasSuffix predicate on the "blurhash" field. +func BlurhashHasSuffix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasSuffix(FieldBlurhash, v)) +} + +// BlurhashIsNil applies the IsNil predicate on the "blurhash" field. +func BlurhashIsNil() predicate.Attachment { + return predicate.Attachment(sql.FieldIsNull(FieldBlurhash)) +} + +// BlurhashNotNil applies the NotNil predicate on the "blurhash" field. +func BlurhashNotNil() predicate.Attachment { + return predicate.Attachment(sql.FieldNotNull(FieldBlurhash)) +} + +// BlurhashEqualFold applies the EqualFold predicate on the "blurhash" field. +func BlurhashEqualFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEqualFold(FieldBlurhash, v)) +} + +// BlurhashContainsFold applies the ContainsFold predicate on the "blurhash" field. +func BlurhashContainsFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContainsFold(FieldBlurhash, v)) +} + +// HeightEQ applies the EQ predicate on the "height" field. +func HeightEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldHeight, v)) +} + +// HeightNEQ applies the NEQ predicate on the "height" field. +func HeightNEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldHeight, v)) +} + +// HeightIn applies the In predicate on the "height" field. +func HeightIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldHeight, vs...)) +} + +// HeightNotIn applies the NotIn predicate on the "height" field. +func HeightNotIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldHeight, vs...)) +} + +// HeightGT applies the GT predicate on the "height" field. +func HeightGT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldHeight, v)) +} + +// HeightGTE applies the GTE predicate on the "height" field. +func HeightGTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldHeight, v)) +} + +// HeightLT applies the LT predicate on the "height" field. +func HeightLT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldHeight, v)) +} + +// HeightLTE applies the LTE predicate on the "height" field. +func HeightLTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldHeight, v)) +} + +// HeightIsNil applies the IsNil predicate on the "height" field. +func HeightIsNil() predicate.Attachment { + return predicate.Attachment(sql.FieldIsNull(FieldHeight)) +} + +// HeightNotNil applies the NotNil predicate on the "height" field. +func HeightNotNil() predicate.Attachment { + return predicate.Attachment(sql.FieldNotNull(FieldHeight)) +} + +// WidthEQ applies the EQ predicate on the "width" field. +func WidthEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldWidth, v)) +} + +// WidthNEQ applies the NEQ predicate on the "width" field. +func WidthNEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldWidth, v)) +} + +// WidthIn applies the In predicate on the "width" field. +func WidthIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldWidth, vs...)) +} + +// WidthNotIn applies the NotIn predicate on the "width" field. +func WidthNotIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldWidth, vs...)) +} + +// WidthGT applies the GT predicate on the "width" field. +func WidthGT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldWidth, v)) +} + +// WidthGTE applies the GTE predicate on the "width" field. +func WidthGTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldWidth, v)) +} + +// WidthLT applies the LT predicate on the "width" field. +func WidthLT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldWidth, v)) +} + +// WidthLTE applies the LTE predicate on the "width" field. +func WidthLTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldWidth, v)) +} + +// WidthIsNil applies the IsNil predicate on the "width" field. +func WidthIsNil() predicate.Attachment { + return predicate.Attachment(sql.FieldIsNull(FieldWidth)) +} + +// WidthNotNil applies the NotNil predicate on the "width" field. +func WidthNotNil() predicate.Attachment { + return predicate.Attachment(sql.FieldNotNull(FieldWidth)) +} + +// FpsEQ applies the EQ predicate on the "fps" field. +func FpsEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldFps, v)) +} + +// FpsNEQ applies the NEQ predicate on the "fps" field. +func FpsNEQ(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldFps, v)) +} + +// FpsIn applies the In predicate on the "fps" field. +func FpsIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldFps, vs...)) +} + +// FpsNotIn applies the NotIn predicate on the "fps" field. +func FpsNotIn(vs ...int) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldFps, vs...)) +} + +// FpsGT applies the GT predicate on the "fps" field. +func FpsGT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldFps, v)) +} + +// FpsGTE applies the GTE predicate on the "fps" field. +func FpsGTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldFps, v)) +} + +// FpsLT applies the LT predicate on the "fps" field. +func FpsLT(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldFps, v)) +} + +// FpsLTE applies the LTE predicate on the "fps" field. +func FpsLTE(v int) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldFps, v)) +} + +// FpsIsNil applies the IsNil predicate on the "fps" field. +func FpsIsNil() predicate.Attachment { + return predicate.Attachment(sql.FieldIsNull(FieldFps)) +} + +// FpsNotNil applies the NotNil predicate on the "fps" field. +func FpsNotNil() predicate.Attachment { + return predicate.Attachment(sql.FieldNotNull(FieldFps)) +} + +// MimeTypeEQ applies the EQ predicate on the "mimeType" field. +func MimeTypeEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEQ(FieldMimeType, v)) +} + +// MimeTypeNEQ applies the NEQ predicate on the "mimeType" field. +func MimeTypeNEQ(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldNEQ(FieldMimeType, v)) +} + +// MimeTypeIn applies the In predicate on the "mimeType" field. +func MimeTypeIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldIn(FieldMimeType, vs...)) +} + +// MimeTypeNotIn applies the NotIn predicate on the "mimeType" field. +func MimeTypeNotIn(vs ...string) predicate.Attachment { + return predicate.Attachment(sql.FieldNotIn(FieldMimeType, vs...)) +} + +// MimeTypeGT applies the GT predicate on the "mimeType" field. +func MimeTypeGT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGT(FieldMimeType, v)) +} + +// MimeTypeGTE applies the GTE predicate on the "mimeType" field. +func MimeTypeGTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldGTE(FieldMimeType, v)) +} + +// MimeTypeLT applies the LT predicate on the "mimeType" field. +func MimeTypeLT(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLT(FieldMimeType, v)) +} + +// MimeTypeLTE applies the LTE predicate on the "mimeType" field. +func MimeTypeLTE(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldLTE(FieldMimeType, v)) +} + +// MimeTypeContains applies the Contains predicate on the "mimeType" field. +func MimeTypeContains(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContains(FieldMimeType, v)) +} + +// MimeTypeHasPrefix applies the HasPrefix predicate on the "mimeType" field. +func MimeTypeHasPrefix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasPrefix(FieldMimeType, v)) +} + +// MimeTypeHasSuffix applies the HasSuffix predicate on the "mimeType" field. +func MimeTypeHasSuffix(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldHasSuffix(FieldMimeType, v)) +} + +// MimeTypeEqualFold applies the EqualFold predicate on the "mimeType" field. +func MimeTypeEqualFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldEqualFold(FieldMimeType, v)) +} + +// MimeTypeContainsFold applies the ContainsFold predicate on the "mimeType" field. +func MimeTypeContainsFold(v string) predicate.Attachment { + return predicate.Attachment(sql.FieldContainsFold(FieldMimeType, v)) +} + +// HasAuthor applies the HasEdge predicate on the "author" edge. +func HasAuthor() predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AuthorTable, AuthorColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAuthorWith applies the HasEdge predicate on the "author" edge with a given conditions (other predicates). +func HasAuthorWith(preds ...predicate.User) predicate.Attachment { + return predicate.Attachment(func(s *sql.Selector) { + step := newAuthorStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Attachment) predicate.Attachment { + return predicate.Attachment(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Attachment) predicate.Attachment { + return predicate.Attachment(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Attachment) predicate.Attachment { + return predicate.Attachment(sql.NotPredicates(p)) +} diff --git a/ent/attachment_create.go b/ent/attachment_create.go new file mode 100644 index 0000000..d60993f --- /dev/null +++ b/ent/attachment_create.go @@ -0,0 +1,1362 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// AttachmentCreate is the builder for creating a Attachment entity. +type AttachmentCreate struct { + config + mutation *AttachmentMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetIsRemote sets the "isRemote" field. +func (ac *AttachmentCreate) SetIsRemote(b bool) *AttachmentCreate { + ac.mutation.SetIsRemote(b) + return ac +} + +// SetURI sets the "uri" field. +func (ac *AttachmentCreate) SetURI(s string) *AttachmentCreate { + ac.mutation.SetURI(s) + return ac +} + +// SetExtensions sets the "extensions" field. +func (ac *AttachmentCreate) SetExtensions(l lysand.Extensions) *AttachmentCreate { + ac.mutation.SetExtensions(l) + return ac +} + +// SetCreatedAt sets the "created_at" field. +func (ac *AttachmentCreate) SetCreatedAt(t time.Time) *AttachmentCreate { + ac.mutation.SetCreatedAt(t) + return ac +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableCreatedAt(t *time.Time) *AttachmentCreate { + if t != nil { + ac.SetCreatedAt(*t) + } + return ac +} + +// SetUpdatedAt sets the "updated_at" field. +func (ac *AttachmentCreate) SetUpdatedAt(t time.Time) *AttachmentCreate { + ac.mutation.SetUpdatedAt(t) + return ac +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableUpdatedAt(t *time.Time) *AttachmentCreate { + if t != nil { + ac.SetUpdatedAt(*t) + } + return ac +} + +// SetDescription sets the "description" field. +func (ac *AttachmentCreate) SetDescription(s string) *AttachmentCreate { + ac.mutation.SetDescription(s) + return ac +} + +// SetSha256 sets the "sha256" field. +func (ac *AttachmentCreate) SetSha256(b []byte) *AttachmentCreate { + ac.mutation.SetSha256(b) + return ac +} + +// SetSize sets the "size" field. +func (ac *AttachmentCreate) SetSize(i int) *AttachmentCreate { + ac.mutation.SetSize(i) + return ac +} + +// SetBlurhash sets the "blurhash" field. +func (ac *AttachmentCreate) SetBlurhash(s string) *AttachmentCreate { + ac.mutation.SetBlurhash(s) + return ac +} + +// SetNillableBlurhash sets the "blurhash" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableBlurhash(s *string) *AttachmentCreate { + if s != nil { + ac.SetBlurhash(*s) + } + return ac +} + +// SetHeight sets the "height" field. +func (ac *AttachmentCreate) SetHeight(i int) *AttachmentCreate { + ac.mutation.SetHeight(i) + return ac +} + +// SetNillableHeight sets the "height" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableHeight(i *int) *AttachmentCreate { + if i != nil { + ac.SetHeight(*i) + } + return ac +} + +// SetWidth sets the "width" field. +func (ac *AttachmentCreate) SetWidth(i int) *AttachmentCreate { + ac.mutation.SetWidth(i) + return ac +} + +// SetNillableWidth sets the "width" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableWidth(i *int) *AttachmentCreate { + if i != nil { + ac.SetWidth(*i) + } + return ac +} + +// SetFps sets the "fps" field. +func (ac *AttachmentCreate) SetFps(i int) *AttachmentCreate { + ac.mutation.SetFps(i) + return ac +} + +// SetNillableFps sets the "fps" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableFps(i *int) *AttachmentCreate { + if i != nil { + ac.SetFps(*i) + } + return ac +} + +// SetMimeType sets the "mimeType" field. +func (ac *AttachmentCreate) SetMimeType(s string) *AttachmentCreate { + ac.mutation.SetMimeType(s) + return ac +} + +// SetID sets the "id" field. +func (ac *AttachmentCreate) SetID(u uuid.UUID) *AttachmentCreate { + ac.mutation.SetID(u) + return ac +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (ac *AttachmentCreate) SetNillableID(u *uuid.UUID) *AttachmentCreate { + if u != nil { + ac.SetID(*u) + } + return ac +} + +// SetAuthorID sets the "author" edge to the User entity by ID. +func (ac *AttachmentCreate) SetAuthorID(id uuid.UUID) *AttachmentCreate { + ac.mutation.SetAuthorID(id) + return ac +} + +// SetAuthor sets the "author" edge to the User entity. +func (ac *AttachmentCreate) SetAuthor(u *User) *AttachmentCreate { + return ac.SetAuthorID(u.ID) +} + +// Mutation returns the AttachmentMutation object of the builder. +func (ac *AttachmentCreate) Mutation() *AttachmentMutation { + return ac.mutation +} + +// Save creates the Attachment in the database. +func (ac *AttachmentCreate) Save(ctx context.Context) (*Attachment, error) { + ac.defaults() + return withHooks(ctx, ac.sqlSave, ac.mutation, ac.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (ac *AttachmentCreate) SaveX(ctx context.Context) *Attachment { + v, err := ac.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ac *AttachmentCreate) Exec(ctx context.Context) error { + _, err := ac.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ac *AttachmentCreate) ExecX(ctx context.Context) { + if err := ac.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (ac *AttachmentCreate) defaults() { + if _, ok := ac.mutation.Extensions(); !ok { + v := attachment.DefaultExtensions + ac.mutation.SetExtensions(v) + } + if _, ok := ac.mutation.CreatedAt(); !ok { + v := attachment.DefaultCreatedAt() + ac.mutation.SetCreatedAt(v) + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + v := attachment.DefaultUpdatedAt() + ac.mutation.SetUpdatedAt(v) + } + if _, ok := ac.mutation.ID(); !ok { + v := attachment.DefaultID() + ac.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ac *AttachmentCreate) check() error { + if _, ok := ac.mutation.IsRemote(); !ok { + return &ValidationError{Name: "isRemote", err: errors.New(`ent: missing required field "Attachment.isRemote"`)} + } + if _, ok := ac.mutation.URI(); !ok { + return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "Attachment.uri"`)} + } + if v, ok := ac.mutation.URI(); ok { + if err := attachment.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Attachment.uri": %w`, err)} + } + } + if _, ok := ac.mutation.Extensions(); !ok { + return &ValidationError{Name: "extensions", err: errors.New(`ent: missing required field "Attachment.extensions"`)} + } + if _, ok := ac.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Attachment.created_at"`)} + } + if _, ok := ac.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Attachment.updated_at"`)} + } + if _, ok := ac.mutation.Description(); !ok { + return &ValidationError{Name: "description", err: errors.New(`ent: missing required field "Attachment.description"`)} + } + if v, ok := ac.mutation.Description(); ok { + if err := attachment.DescriptionValidator(v); err != nil { + return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Attachment.description": %w`, err)} + } + } + if _, ok := ac.mutation.Sha256(); !ok { + return &ValidationError{Name: "sha256", err: errors.New(`ent: missing required field "Attachment.sha256"`)} + } + if _, ok := ac.mutation.Size(); !ok { + return &ValidationError{Name: "size", err: errors.New(`ent: missing required field "Attachment.size"`)} + } + if _, ok := ac.mutation.MimeType(); !ok { + return &ValidationError{Name: "mimeType", err: errors.New(`ent: missing required field "Attachment.mimeType"`)} + } + if _, ok := ac.mutation.AuthorID(); !ok { + return &ValidationError{Name: "author", err: errors.New(`ent: missing required edge "Attachment.author"`)} + } + return nil +} + +func (ac *AttachmentCreate) sqlSave(ctx context.Context) (*Attachment, error) { + if err := ac.check(); err != nil { + return nil, err + } + _node, _spec := ac.createSpec() + if err := sqlgraph.CreateNode(ctx, ac.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + ac.mutation.id = &_node.ID + ac.mutation.done = true + return _node, nil +} + +func (ac *AttachmentCreate) createSpec() (*Attachment, *sqlgraph.CreateSpec) { + var ( + _node = &Attachment{config: ac.config} + _spec = sqlgraph.NewCreateSpec(attachment.Table, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = ac.conflict + if id, ok := ac.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := ac.mutation.IsRemote(); ok { + _spec.SetField(attachment.FieldIsRemote, field.TypeBool, value) + _node.IsRemote = value + } + if value, ok := ac.mutation.URI(); ok { + _spec.SetField(attachment.FieldURI, field.TypeString, value) + _node.URI = value + } + if value, ok := ac.mutation.Extensions(); ok { + _spec.SetField(attachment.FieldExtensions, field.TypeJSON, value) + _node.Extensions = value + } + if value, ok := ac.mutation.CreatedAt(); ok { + _spec.SetField(attachment.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := ac.mutation.UpdatedAt(); ok { + _spec.SetField(attachment.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := ac.mutation.Description(); ok { + _spec.SetField(attachment.FieldDescription, field.TypeString, value) + _node.Description = value + } + if value, ok := ac.mutation.Sha256(); ok { + _spec.SetField(attachment.FieldSha256, field.TypeBytes, value) + _node.Sha256 = value + } + if value, ok := ac.mutation.Size(); ok { + _spec.SetField(attachment.FieldSize, field.TypeInt, value) + _node.Size = value + } + if value, ok := ac.mutation.Blurhash(); ok { + _spec.SetField(attachment.FieldBlurhash, field.TypeString, value) + _node.Blurhash = &value + } + if value, ok := ac.mutation.Height(); ok { + _spec.SetField(attachment.FieldHeight, field.TypeInt, value) + _node.Height = &value + } + if value, ok := ac.mutation.Width(); ok { + _spec.SetField(attachment.FieldWidth, field.TypeInt, value) + _node.Width = &value + } + if value, ok := ac.mutation.Fps(); ok { + _spec.SetField(attachment.FieldFps, field.TypeInt, value) + _node.Fps = &value + } + if value, ok := ac.mutation.MimeType(); ok { + _spec.SetField(attachment.FieldMimeType, field.TypeString, value) + _node.MimeType = value + } + if nodes := ac.mutation.AuthorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: attachment.AuthorTable, + Columns: []string{attachment.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.attachment_author = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Attachment.Create(). +// SetIsRemote(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AttachmentUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (ac *AttachmentCreate) OnConflict(opts ...sql.ConflictOption) *AttachmentUpsertOne { + ac.conflict = opts + return &AttachmentUpsertOne{ + create: ac, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Attachment.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ac *AttachmentCreate) OnConflictColumns(columns ...string) *AttachmentUpsertOne { + ac.conflict = append(ac.conflict, sql.ConflictColumns(columns...)) + return &AttachmentUpsertOne{ + create: ac, + } +} + +type ( + // AttachmentUpsertOne is the builder for "upsert"-ing + // one Attachment node. + AttachmentUpsertOne struct { + create *AttachmentCreate + } + + // AttachmentUpsert is the "OnConflict" setter. + AttachmentUpsert struct { + *sql.UpdateSet + } +) + +// SetIsRemote sets the "isRemote" field. +func (u *AttachmentUpsert) SetIsRemote(v bool) *AttachmentUpsert { + u.Set(attachment.FieldIsRemote, v) + return u +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateIsRemote() *AttachmentUpsert { + u.SetExcluded(attachment.FieldIsRemote) + return u +} + +// SetURI sets the "uri" field. +func (u *AttachmentUpsert) SetURI(v string) *AttachmentUpsert { + u.Set(attachment.FieldURI, v) + return u +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateURI() *AttachmentUpsert { + u.SetExcluded(attachment.FieldURI) + return u +} + +// SetExtensions sets the "extensions" field. +func (u *AttachmentUpsert) SetExtensions(v lysand.Extensions) *AttachmentUpsert { + u.Set(attachment.FieldExtensions, v) + return u +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateExtensions() *AttachmentUpsert { + u.SetExcluded(attachment.FieldExtensions) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AttachmentUpsert) SetUpdatedAt(v time.Time) *AttachmentUpsert { + u.Set(attachment.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateUpdatedAt() *AttachmentUpsert { + u.SetExcluded(attachment.FieldUpdatedAt) + return u +} + +// SetDescription sets the "description" field. +func (u *AttachmentUpsert) SetDescription(v string) *AttachmentUpsert { + u.Set(attachment.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateDescription() *AttachmentUpsert { + u.SetExcluded(attachment.FieldDescription) + return u +} + +// SetSha256 sets the "sha256" field. +func (u *AttachmentUpsert) SetSha256(v []byte) *AttachmentUpsert { + u.Set(attachment.FieldSha256, v) + return u +} + +// UpdateSha256 sets the "sha256" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateSha256() *AttachmentUpsert { + u.SetExcluded(attachment.FieldSha256) + return u +} + +// SetSize sets the "size" field. +func (u *AttachmentUpsert) SetSize(v int) *AttachmentUpsert { + u.Set(attachment.FieldSize, v) + return u +} + +// UpdateSize sets the "size" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateSize() *AttachmentUpsert { + u.SetExcluded(attachment.FieldSize) + return u +} + +// AddSize adds v to the "size" field. +func (u *AttachmentUpsert) AddSize(v int) *AttachmentUpsert { + u.Add(attachment.FieldSize, v) + return u +} + +// SetBlurhash sets the "blurhash" field. +func (u *AttachmentUpsert) SetBlurhash(v string) *AttachmentUpsert { + u.Set(attachment.FieldBlurhash, v) + return u +} + +// UpdateBlurhash sets the "blurhash" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateBlurhash() *AttachmentUpsert { + u.SetExcluded(attachment.FieldBlurhash) + return u +} + +// ClearBlurhash clears the value of the "blurhash" field. +func (u *AttachmentUpsert) ClearBlurhash() *AttachmentUpsert { + u.SetNull(attachment.FieldBlurhash) + return u +} + +// SetHeight sets the "height" field. +func (u *AttachmentUpsert) SetHeight(v int) *AttachmentUpsert { + u.Set(attachment.FieldHeight, v) + return u +} + +// UpdateHeight sets the "height" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateHeight() *AttachmentUpsert { + u.SetExcluded(attachment.FieldHeight) + return u +} + +// AddHeight adds v to the "height" field. +func (u *AttachmentUpsert) AddHeight(v int) *AttachmentUpsert { + u.Add(attachment.FieldHeight, v) + return u +} + +// ClearHeight clears the value of the "height" field. +func (u *AttachmentUpsert) ClearHeight() *AttachmentUpsert { + u.SetNull(attachment.FieldHeight) + return u +} + +// SetWidth sets the "width" field. +func (u *AttachmentUpsert) SetWidth(v int) *AttachmentUpsert { + u.Set(attachment.FieldWidth, v) + return u +} + +// UpdateWidth sets the "width" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateWidth() *AttachmentUpsert { + u.SetExcluded(attachment.FieldWidth) + return u +} + +// AddWidth adds v to the "width" field. +func (u *AttachmentUpsert) AddWidth(v int) *AttachmentUpsert { + u.Add(attachment.FieldWidth, v) + return u +} + +// ClearWidth clears the value of the "width" field. +func (u *AttachmentUpsert) ClearWidth() *AttachmentUpsert { + u.SetNull(attachment.FieldWidth) + return u +} + +// SetFps sets the "fps" field. +func (u *AttachmentUpsert) SetFps(v int) *AttachmentUpsert { + u.Set(attachment.FieldFps, v) + return u +} + +// UpdateFps sets the "fps" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateFps() *AttachmentUpsert { + u.SetExcluded(attachment.FieldFps) + return u +} + +// AddFps adds v to the "fps" field. +func (u *AttachmentUpsert) AddFps(v int) *AttachmentUpsert { + u.Add(attachment.FieldFps, v) + return u +} + +// ClearFps clears the value of the "fps" field. +func (u *AttachmentUpsert) ClearFps() *AttachmentUpsert { + u.SetNull(attachment.FieldFps) + return u +} + +// SetMimeType sets the "mimeType" field. +func (u *AttachmentUpsert) SetMimeType(v string) *AttachmentUpsert { + u.Set(attachment.FieldMimeType, v) + return u +} + +// UpdateMimeType sets the "mimeType" field to the value that was provided on create. +func (u *AttachmentUpsert) UpdateMimeType() *AttachmentUpsert { + u.SetExcluded(attachment.FieldMimeType) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.Attachment.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(attachment.FieldID) +// }), +// ). +// Exec(ctx) +func (u *AttachmentUpsertOne) UpdateNewValues() *AttachmentUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(attachment.FieldID) + } + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(attachment.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Attachment.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AttachmentUpsertOne) Ignore() *AttachmentUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AttachmentUpsertOne) DoNothing() *AttachmentUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AttachmentCreate.OnConflict +// documentation for more info. +func (u *AttachmentUpsertOne) Update(set func(*AttachmentUpsert)) *AttachmentUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AttachmentUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *AttachmentUpsertOne) SetIsRemote(v bool) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateIsRemote() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *AttachmentUpsertOne) SetURI(v string) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateURI() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *AttachmentUpsertOne) SetExtensions(v lysand.Extensions) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateExtensions() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AttachmentUpsertOne) SetUpdatedAt(v time.Time) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateUpdatedAt() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDescription sets the "description" field. +func (u *AttachmentUpsertOne) SetDescription(v string) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateDescription() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateDescription() + }) +} + +// SetSha256 sets the "sha256" field. +func (u *AttachmentUpsertOne) SetSha256(v []byte) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetSha256(v) + }) +} + +// UpdateSha256 sets the "sha256" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateSha256() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateSha256() + }) +} + +// SetSize sets the "size" field. +func (u *AttachmentUpsertOne) SetSize(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetSize(v) + }) +} + +// AddSize adds v to the "size" field. +func (u *AttachmentUpsertOne) AddSize(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.AddSize(v) + }) +} + +// UpdateSize sets the "size" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateSize() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateSize() + }) +} + +// SetBlurhash sets the "blurhash" field. +func (u *AttachmentUpsertOne) SetBlurhash(v string) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetBlurhash(v) + }) +} + +// UpdateBlurhash sets the "blurhash" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateBlurhash() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateBlurhash() + }) +} + +// ClearBlurhash clears the value of the "blurhash" field. +func (u *AttachmentUpsertOne) ClearBlurhash() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.ClearBlurhash() + }) +} + +// SetHeight sets the "height" field. +func (u *AttachmentUpsertOne) SetHeight(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetHeight(v) + }) +} + +// AddHeight adds v to the "height" field. +func (u *AttachmentUpsertOne) AddHeight(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.AddHeight(v) + }) +} + +// UpdateHeight sets the "height" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateHeight() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateHeight() + }) +} + +// ClearHeight clears the value of the "height" field. +func (u *AttachmentUpsertOne) ClearHeight() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.ClearHeight() + }) +} + +// SetWidth sets the "width" field. +func (u *AttachmentUpsertOne) SetWidth(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetWidth(v) + }) +} + +// AddWidth adds v to the "width" field. +func (u *AttachmentUpsertOne) AddWidth(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.AddWidth(v) + }) +} + +// UpdateWidth sets the "width" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateWidth() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateWidth() + }) +} + +// ClearWidth clears the value of the "width" field. +func (u *AttachmentUpsertOne) ClearWidth() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.ClearWidth() + }) +} + +// SetFps sets the "fps" field. +func (u *AttachmentUpsertOne) SetFps(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetFps(v) + }) +} + +// AddFps adds v to the "fps" field. +func (u *AttachmentUpsertOne) AddFps(v int) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.AddFps(v) + }) +} + +// UpdateFps sets the "fps" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateFps() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateFps() + }) +} + +// ClearFps clears the value of the "fps" field. +func (u *AttachmentUpsertOne) ClearFps() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.ClearFps() + }) +} + +// SetMimeType sets the "mimeType" field. +func (u *AttachmentUpsertOne) SetMimeType(v string) *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.SetMimeType(v) + }) +} + +// UpdateMimeType sets the "mimeType" field to the value that was provided on create. +func (u *AttachmentUpsertOne) UpdateMimeType() *AttachmentUpsertOne { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateMimeType() + }) +} + +// Exec executes the query. +func (u *AttachmentUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AttachmentCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AttachmentUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *AttachmentUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: AttachmentUpsertOne.ID is not supported by MySQL driver. Use AttachmentUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *AttachmentUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// AttachmentCreateBulk is the builder for creating many Attachment entities in bulk. +type AttachmentCreateBulk struct { + config + err error + builders []*AttachmentCreate + conflict []sql.ConflictOption +} + +// Save creates the Attachment entities in the database. +func (acb *AttachmentCreateBulk) Save(ctx context.Context) ([]*Attachment, error) { + if acb.err != nil { + return nil, acb.err + } + specs := make([]*sqlgraph.CreateSpec, len(acb.builders)) + nodes := make([]*Attachment, len(acb.builders)) + mutators := make([]Mutator, len(acb.builders)) + for i := range acb.builders { + func(i int, root context.Context) { + builder := acb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*AttachmentMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, acb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = acb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, acb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, acb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (acb *AttachmentCreateBulk) SaveX(ctx context.Context) []*Attachment { + v, err := acb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (acb *AttachmentCreateBulk) Exec(ctx context.Context) error { + _, err := acb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (acb *AttachmentCreateBulk) ExecX(ctx context.Context) { + if err := acb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Attachment.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.AttachmentUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (acb *AttachmentCreateBulk) OnConflict(opts ...sql.ConflictOption) *AttachmentUpsertBulk { + acb.conflict = opts + return &AttachmentUpsertBulk{ + create: acb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Attachment.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (acb *AttachmentCreateBulk) OnConflictColumns(columns ...string) *AttachmentUpsertBulk { + acb.conflict = append(acb.conflict, sql.ConflictColumns(columns...)) + return &AttachmentUpsertBulk{ + create: acb, + } +} + +// AttachmentUpsertBulk is the builder for "upsert"-ing +// a bulk of Attachment nodes. +type AttachmentUpsertBulk struct { + create *AttachmentCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Attachment.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(attachment.FieldID) +// }), +// ). +// Exec(ctx) +func (u *AttachmentUpsertBulk) UpdateNewValues() *AttachmentUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(attachment.FieldID) + } + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(attachment.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Attachment.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *AttachmentUpsertBulk) Ignore() *AttachmentUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *AttachmentUpsertBulk) DoNothing() *AttachmentUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the AttachmentCreateBulk.OnConflict +// documentation for more info. +func (u *AttachmentUpsertBulk) Update(set func(*AttachmentUpsert)) *AttachmentUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&AttachmentUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *AttachmentUpsertBulk) SetIsRemote(v bool) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateIsRemote() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *AttachmentUpsertBulk) SetURI(v string) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateURI() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *AttachmentUpsertBulk) SetExtensions(v lysand.Extensions) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateExtensions() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *AttachmentUpsertBulk) SetUpdatedAt(v time.Time) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateUpdatedAt() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetDescription sets the "description" field. +func (u *AttachmentUpsertBulk) SetDescription(v string) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateDescription() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateDescription() + }) +} + +// SetSha256 sets the "sha256" field. +func (u *AttachmentUpsertBulk) SetSha256(v []byte) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetSha256(v) + }) +} + +// UpdateSha256 sets the "sha256" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateSha256() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateSha256() + }) +} + +// SetSize sets the "size" field. +func (u *AttachmentUpsertBulk) SetSize(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetSize(v) + }) +} + +// AddSize adds v to the "size" field. +func (u *AttachmentUpsertBulk) AddSize(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.AddSize(v) + }) +} + +// UpdateSize sets the "size" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateSize() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateSize() + }) +} + +// SetBlurhash sets the "blurhash" field. +func (u *AttachmentUpsertBulk) SetBlurhash(v string) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetBlurhash(v) + }) +} + +// UpdateBlurhash sets the "blurhash" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateBlurhash() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateBlurhash() + }) +} + +// ClearBlurhash clears the value of the "blurhash" field. +func (u *AttachmentUpsertBulk) ClearBlurhash() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.ClearBlurhash() + }) +} + +// SetHeight sets the "height" field. +func (u *AttachmentUpsertBulk) SetHeight(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetHeight(v) + }) +} + +// AddHeight adds v to the "height" field. +func (u *AttachmentUpsertBulk) AddHeight(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.AddHeight(v) + }) +} + +// UpdateHeight sets the "height" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateHeight() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateHeight() + }) +} + +// ClearHeight clears the value of the "height" field. +func (u *AttachmentUpsertBulk) ClearHeight() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.ClearHeight() + }) +} + +// SetWidth sets the "width" field. +func (u *AttachmentUpsertBulk) SetWidth(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetWidth(v) + }) +} + +// AddWidth adds v to the "width" field. +func (u *AttachmentUpsertBulk) AddWidth(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.AddWidth(v) + }) +} + +// UpdateWidth sets the "width" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateWidth() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateWidth() + }) +} + +// ClearWidth clears the value of the "width" field. +func (u *AttachmentUpsertBulk) ClearWidth() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.ClearWidth() + }) +} + +// SetFps sets the "fps" field. +func (u *AttachmentUpsertBulk) SetFps(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetFps(v) + }) +} + +// AddFps adds v to the "fps" field. +func (u *AttachmentUpsertBulk) AddFps(v int) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.AddFps(v) + }) +} + +// UpdateFps sets the "fps" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateFps() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateFps() + }) +} + +// ClearFps clears the value of the "fps" field. +func (u *AttachmentUpsertBulk) ClearFps() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.ClearFps() + }) +} + +// SetMimeType sets the "mimeType" field. +func (u *AttachmentUpsertBulk) SetMimeType(v string) *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.SetMimeType(v) + }) +} + +// UpdateMimeType sets the "mimeType" field to the value that was provided on create. +func (u *AttachmentUpsertBulk) UpdateMimeType() *AttachmentUpsertBulk { + return u.Update(func(s *AttachmentUpsert) { + s.UpdateMimeType() + }) +} + +// Exec executes the query. +func (u *AttachmentUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the AttachmentCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for AttachmentCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *AttachmentUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/attachment_delete.go b/ent/attachment_delete.go new file mode 100644 index 0000000..e469e0a --- /dev/null +++ b/ent/attachment_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// AttachmentDelete is the builder for deleting a Attachment entity. +type AttachmentDelete struct { + config + hooks []Hook + mutation *AttachmentMutation +} + +// Where appends a list predicates to the AttachmentDelete builder. +func (ad *AttachmentDelete) Where(ps ...predicate.Attachment) *AttachmentDelete { + ad.mutation.Where(ps...) + return ad +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ad *AttachmentDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ad.sqlExec, ad.mutation, ad.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ad *AttachmentDelete) ExecX(ctx context.Context) int { + n, err := ad.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ad *AttachmentDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(attachment.Table, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID)) + if ps := ad.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ad.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ad.mutation.done = true + return affected, err +} + +// AttachmentDeleteOne is the builder for deleting a single Attachment entity. +type AttachmentDeleteOne struct { + ad *AttachmentDelete +} + +// Where appends a list predicates to the AttachmentDelete builder. +func (ado *AttachmentDeleteOne) Where(ps ...predicate.Attachment) *AttachmentDeleteOne { + ado.ad.mutation.Where(ps...) + return ado +} + +// Exec executes the deletion query. +func (ado *AttachmentDeleteOne) Exec(ctx context.Context) error { + n, err := ado.ad.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{attachment.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ado *AttachmentDeleteOne) ExecX(ctx context.Context) { + if err := ado.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/attachment_query.go b/ent/attachment_query.go new file mode 100644 index 0000000..344a3c2 --- /dev/null +++ b/ent/attachment_query.go @@ -0,0 +1,614 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" +) + +// AttachmentQuery is the builder for querying Attachment entities. +type AttachmentQuery struct { + config + ctx *QueryContext + order []attachment.OrderOption + inters []Interceptor + predicates []predicate.Attachment + withAuthor *UserQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the AttachmentQuery builder. +func (aq *AttachmentQuery) Where(ps ...predicate.Attachment) *AttachmentQuery { + aq.predicates = append(aq.predicates, ps...) + return aq +} + +// Limit the number of records to be returned by this query. +func (aq *AttachmentQuery) Limit(limit int) *AttachmentQuery { + aq.ctx.Limit = &limit + return aq +} + +// Offset to start from. +func (aq *AttachmentQuery) Offset(offset int) *AttachmentQuery { + aq.ctx.Offset = &offset + return aq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (aq *AttachmentQuery) Unique(unique bool) *AttachmentQuery { + aq.ctx.Unique = &unique + return aq +} + +// Order specifies how the records should be ordered. +func (aq *AttachmentQuery) Order(o ...attachment.OrderOption) *AttachmentQuery { + aq.order = append(aq.order, o...) + return aq +} + +// QueryAuthor chains the current query on the "author" edge. +func (aq *AttachmentQuery) QueryAuthor() *UserQuery { + query := (&UserClient{config: aq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := aq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(attachment.Table, attachment.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, attachment.AuthorTable, attachment.AuthorColumn), + ) + fromU = sqlgraph.SetNeighbors(aq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Attachment entity from the query. +// Returns a *NotFoundError when no Attachment was found. +func (aq *AttachmentQuery) First(ctx context.Context) (*Attachment, error) { + nodes, err := aq.Limit(1).All(setContextOp(ctx, aq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{attachment.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (aq *AttachmentQuery) FirstX(ctx context.Context) *Attachment { + node, err := aq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Attachment ID from the query. +// Returns a *NotFoundError when no Attachment ID was found. +func (aq *AttachmentQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = aq.Limit(1).IDs(setContextOp(ctx, aq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{attachment.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (aq *AttachmentQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := aq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Attachment entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Attachment entity is found. +// Returns a *NotFoundError when no Attachment entities are found. +func (aq *AttachmentQuery) Only(ctx context.Context) (*Attachment, error) { + nodes, err := aq.Limit(2).All(setContextOp(ctx, aq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{attachment.Label} + default: + return nil, &NotSingularError{attachment.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (aq *AttachmentQuery) OnlyX(ctx context.Context) *Attachment { + node, err := aq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Attachment ID in the query. +// Returns a *NotSingularError when more than one Attachment ID is found. +// Returns a *NotFoundError when no entities are found. +func (aq *AttachmentQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = aq.Limit(2).IDs(setContextOp(ctx, aq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{attachment.Label} + default: + err = &NotSingularError{attachment.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (aq *AttachmentQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := aq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Attachments. +func (aq *AttachmentQuery) All(ctx context.Context) ([]*Attachment, error) { + ctx = setContextOp(ctx, aq.ctx, "All") + if err := aq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Attachment, *AttachmentQuery]() + return withInterceptors[[]*Attachment](ctx, aq, qr, aq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (aq *AttachmentQuery) AllX(ctx context.Context) []*Attachment { + nodes, err := aq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Attachment IDs. +func (aq *AttachmentQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if aq.ctx.Unique == nil && aq.path != nil { + aq.Unique(true) + } + ctx = setContextOp(ctx, aq.ctx, "IDs") + if err = aq.Select(attachment.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (aq *AttachmentQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := aq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (aq *AttachmentQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, aq.ctx, "Count") + if err := aq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, aq, querierCount[*AttachmentQuery](), aq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (aq *AttachmentQuery) CountX(ctx context.Context) int { + count, err := aq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (aq *AttachmentQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, aq.ctx, "Exist") + switch _, err := aq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (aq *AttachmentQuery) ExistX(ctx context.Context) bool { + exist, err := aq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the AttachmentQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (aq *AttachmentQuery) Clone() *AttachmentQuery { + if aq == nil { + return nil + } + return &AttachmentQuery{ + config: aq.config, + ctx: aq.ctx.Clone(), + order: append([]attachment.OrderOption{}, aq.order...), + inters: append([]Interceptor{}, aq.inters...), + predicates: append([]predicate.Attachment{}, aq.predicates...), + withAuthor: aq.withAuthor.Clone(), + // clone intermediate query. + sql: aq.sql.Clone(), + path: aq.path, + } +} + +// WithAuthor tells the query-builder to eager-load the nodes that are connected to +// the "author" edge. The optional arguments are used to configure the query builder of the edge. +func (aq *AttachmentQuery) WithAuthor(opts ...func(*UserQuery)) *AttachmentQuery { + query := (&UserClient{config: aq.config}).Query() + for _, opt := range opts { + opt(query) + } + aq.withAuthor = query + return aq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Attachment.Query(). +// GroupBy(attachment.FieldIsRemote). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (aq *AttachmentQuery) GroupBy(field string, fields ...string) *AttachmentGroupBy { + aq.ctx.Fields = append([]string{field}, fields...) + grbuild := &AttachmentGroupBy{build: aq} + grbuild.flds = &aq.ctx.Fields + grbuild.label = attachment.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// } +// +// client.Attachment.Query(). +// Select(attachment.FieldIsRemote). +// Scan(ctx, &v) +func (aq *AttachmentQuery) Select(fields ...string) *AttachmentSelect { + aq.ctx.Fields = append(aq.ctx.Fields, fields...) + sbuild := &AttachmentSelect{AttachmentQuery: aq} + sbuild.label = attachment.Label + sbuild.flds, sbuild.scan = &aq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a AttachmentSelect configured with the given aggregations. +func (aq *AttachmentQuery) Aggregate(fns ...AggregateFunc) *AttachmentSelect { + return aq.Select().Aggregate(fns...) +} + +func (aq *AttachmentQuery) prepareQuery(ctx context.Context) error { + for _, inter := range aq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, aq); err != nil { + return err + } + } + } + for _, f := range aq.ctx.Fields { + if !attachment.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if aq.path != nil { + prev, err := aq.path(ctx) + if err != nil { + return err + } + aq.sql = prev + } + return nil +} + +func (aq *AttachmentQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Attachment, error) { + var ( + nodes = []*Attachment{} + withFKs = aq.withFKs + _spec = aq.querySpec() + loadedTypes = [1]bool{ + aq.withAuthor != nil, + } + ) + if aq.withAuthor != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, attachment.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Attachment).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Attachment{config: aq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, aq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := aq.withAuthor; query != nil { + if err := aq.loadAuthor(ctx, query, nodes, nil, + func(n *Attachment, e *User) { n.Edges.Author = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (aq *AttachmentQuery) loadAuthor(ctx context.Context, query *UserQuery, nodes []*Attachment, init func(*Attachment), assign func(*Attachment, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Attachment) + for i := range nodes { + if nodes[i].attachment_author == nil { + continue + } + fk := *nodes[i].attachment_author + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "attachment_author" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (aq *AttachmentQuery) sqlCount(ctx context.Context) (int, error) { + _spec := aq.querySpec() + _spec.Node.Columns = aq.ctx.Fields + if len(aq.ctx.Fields) > 0 { + _spec.Unique = aq.ctx.Unique != nil && *aq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, aq.driver, _spec) +} + +func (aq *AttachmentQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(attachment.Table, attachment.Columns, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID)) + _spec.From = aq.sql + if unique := aq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if aq.path != nil { + _spec.Unique = true + } + if fields := aq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID) + for i := range fields { + if fields[i] != attachment.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := aq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := aq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := aq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := aq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (aq *AttachmentQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(aq.driver.Dialect()) + t1 := builder.Table(attachment.Table) + columns := aq.ctx.Fields + if len(columns) == 0 { + columns = attachment.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if aq.sql != nil { + selector = aq.sql + selector.Select(selector.Columns(columns...)...) + } + if aq.ctx.Unique != nil && *aq.ctx.Unique { + selector.Distinct() + } + for _, p := range aq.predicates { + p(selector) + } + for _, p := range aq.order { + p(selector) + } + if offset := aq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := aq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// AttachmentGroupBy is the group-by builder for Attachment entities. +type AttachmentGroupBy struct { + selector + build *AttachmentQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (agb *AttachmentGroupBy) Aggregate(fns ...AggregateFunc) *AttachmentGroupBy { + agb.fns = append(agb.fns, fns...) + return agb +} + +// Scan applies the selector query and scans the result into the given value. +func (agb *AttachmentGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, agb.build.ctx, "GroupBy") + if err := agb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AttachmentQuery, *AttachmentGroupBy](ctx, agb.build, agb, agb.build.inters, v) +} + +func (agb *AttachmentGroupBy) sqlScan(ctx context.Context, root *AttachmentQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(agb.fns)) + for _, fn := range agb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*agb.flds)+len(agb.fns)) + for _, f := range *agb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*agb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := agb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// AttachmentSelect is the builder for selecting fields of Attachment entities. +type AttachmentSelect struct { + *AttachmentQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (as *AttachmentSelect) Aggregate(fns ...AggregateFunc) *AttachmentSelect { + as.fns = append(as.fns, fns...) + return as +} + +// Scan applies the selector query and scans the result into the given value. +func (as *AttachmentSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, as.ctx, "Select") + if err := as.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*AttachmentQuery, *AttachmentSelect](ctx, as.AttachmentQuery, as, as.inters, v) +} + +func (as *AttachmentSelect) sqlScan(ctx context.Context, root *AttachmentQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(as.fns)) + for _, fn := range as.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*as.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := as.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/attachment_update.go b/ent/attachment_update.go new file mode 100644 index 0000000..f544283 --- /dev/null +++ b/ent/attachment_update.go @@ -0,0 +1,843 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// AttachmentUpdate is the builder for updating Attachment entities. +type AttachmentUpdate struct { + config + hooks []Hook + mutation *AttachmentMutation +} + +// Where appends a list predicates to the AttachmentUpdate builder. +func (au *AttachmentUpdate) Where(ps ...predicate.Attachment) *AttachmentUpdate { + au.mutation.Where(ps...) + return au +} + +// SetIsRemote sets the "isRemote" field. +func (au *AttachmentUpdate) SetIsRemote(b bool) *AttachmentUpdate { + au.mutation.SetIsRemote(b) + return au +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableIsRemote(b *bool) *AttachmentUpdate { + if b != nil { + au.SetIsRemote(*b) + } + return au +} + +// SetURI sets the "uri" field. +func (au *AttachmentUpdate) SetURI(s string) *AttachmentUpdate { + au.mutation.SetURI(s) + return au +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableURI(s *string) *AttachmentUpdate { + if s != nil { + au.SetURI(*s) + } + return au +} + +// SetExtensions sets the "extensions" field. +func (au *AttachmentUpdate) SetExtensions(l lysand.Extensions) *AttachmentUpdate { + au.mutation.SetExtensions(l) + return au +} + +// SetUpdatedAt sets the "updated_at" field. +func (au *AttachmentUpdate) SetUpdatedAt(t time.Time) *AttachmentUpdate { + au.mutation.SetUpdatedAt(t) + return au +} + +// SetDescription sets the "description" field. +func (au *AttachmentUpdate) SetDescription(s string) *AttachmentUpdate { + au.mutation.SetDescription(s) + return au +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableDescription(s *string) *AttachmentUpdate { + if s != nil { + au.SetDescription(*s) + } + return au +} + +// SetSha256 sets the "sha256" field. +func (au *AttachmentUpdate) SetSha256(b []byte) *AttachmentUpdate { + au.mutation.SetSha256(b) + return au +} + +// SetSize sets the "size" field. +func (au *AttachmentUpdate) SetSize(i int) *AttachmentUpdate { + au.mutation.ResetSize() + au.mutation.SetSize(i) + return au +} + +// SetNillableSize sets the "size" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableSize(i *int) *AttachmentUpdate { + if i != nil { + au.SetSize(*i) + } + return au +} + +// AddSize adds i to the "size" field. +func (au *AttachmentUpdate) AddSize(i int) *AttachmentUpdate { + au.mutation.AddSize(i) + return au +} + +// SetBlurhash sets the "blurhash" field. +func (au *AttachmentUpdate) SetBlurhash(s string) *AttachmentUpdate { + au.mutation.SetBlurhash(s) + return au +} + +// SetNillableBlurhash sets the "blurhash" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableBlurhash(s *string) *AttachmentUpdate { + if s != nil { + au.SetBlurhash(*s) + } + return au +} + +// ClearBlurhash clears the value of the "blurhash" field. +func (au *AttachmentUpdate) ClearBlurhash() *AttachmentUpdate { + au.mutation.ClearBlurhash() + return au +} + +// SetHeight sets the "height" field. +func (au *AttachmentUpdate) SetHeight(i int) *AttachmentUpdate { + au.mutation.ResetHeight() + au.mutation.SetHeight(i) + return au +} + +// SetNillableHeight sets the "height" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableHeight(i *int) *AttachmentUpdate { + if i != nil { + au.SetHeight(*i) + } + return au +} + +// AddHeight adds i to the "height" field. +func (au *AttachmentUpdate) AddHeight(i int) *AttachmentUpdate { + au.mutation.AddHeight(i) + return au +} + +// ClearHeight clears the value of the "height" field. +func (au *AttachmentUpdate) ClearHeight() *AttachmentUpdate { + au.mutation.ClearHeight() + return au +} + +// SetWidth sets the "width" field. +func (au *AttachmentUpdate) SetWidth(i int) *AttachmentUpdate { + au.mutation.ResetWidth() + au.mutation.SetWidth(i) + return au +} + +// SetNillableWidth sets the "width" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableWidth(i *int) *AttachmentUpdate { + if i != nil { + au.SetWidth(*i) + } + return au +} + +// AddWidth adds i to the "width" field. +func (au *AttachmentUpdate) AddWidth(i int) *AttachmentUpdate { + au.mutation.AddWidth(i) + return au +} + +// ClearWidth clears the value of the "width" field. +func (au *AttachmentUpdate) ClearWidth() *AttachmentUpdate { + au.mutation.ClearWidth() + return au +} + +// SetFps sets the "fps" field. +func (au *AttachmentUpdate) SetFps(i int) *AttachmentUpdate { + au.mutation.ResetFps() + au.mutation.SetFps(i) + return au +} + +// SetNillableFps sets the "fps" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableFps(i *int) *AttachmentUpdate { + if i != nil { + au.SetFps(*i) + } + return au +} + +// AddFps adds i to the "fps" field. +func (au *AttachmentUpdate) AddFps(i int) *AttachmentUpdate { + au.mutation.AddFps(i) + return au +} + +// ClearFps clears the value of the "fps" field. +func (au *AttachmentUpdate) ClearFps() *AttachmentUpdate { + au.mutation.ClearFps() + return au +} + +// SetMimeType sets the "mimeType" field. +func (au *AttachmentUpdate) SetMimeType(s string) *AttachmentUpdate { + au.mutation.SetMimeType(s) + return au +} + +// SetNillableMimeType sets the "mimeType" field if the given value is not nil. +func (au *AttachmentUpdate) SetNillableMimeType(s *string) *AttachmentUpdate { + if s != nil { + au.SetMimeType(*s) + } + return au +} + +// SetAuthorID sets the "author" edge to the User entity by ID. +func (au *AttachmentUpdate) SetAuthorID(id uuid.UUID) *AttachmentUpdate { + au.mutation.SetAuthorID(id) + return au +} + +// SetAuthor sets the "author" edge to the User entity. +func (au *AttachmentUpdate) SetAuthor(u *User) *AttachmentUpdate { + return au.SetAuthorID(u.ID) +} + +// Mutation returns the AttachmentMutation object of the builder. +func (au *AttachmentUpdate) Mutation() *AttachmentMutation { + return au.mutation +} + +// ClearAuthor clears the "author" edge to the User entity. +func (au *AttachmentUpdate) ClearAuthor() *AttachmentUpdate { + au.mutation.ClearAuthor() + return au +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (au *AttachmentUpdate) Save(ctx context.Context) (int, error) { + au.defaults() + return withHooks(ctx, au.sqlSave, au.mutation, au.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (au *AttachmentUpdate) SaveX(ctx context.Context) int { + affected, err := au.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (au *AttachmentUpdate) Exec(ctx context.Context) error { + _, err := au.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (au *AttachmentUpdate) ExecX(ctx context.Context) { + if err := au.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (au *AttachmentUpdate) defaults() { + if _, ok := au.mutation.UpdatedAt(); !ok { + v := attachment.UpdateDefaultUpdatedAt() + au.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (au *AttachmentUpdate) check() error { + if v, ok := au.mutation.URI(); ok { + if err := attachment.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Attachment.uri": %w`, err)} + } + } + if v, ok := au.mutation.Description(); ok { + if err := attachment.DescriptionValidator(v); err != nil { + return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Attachment.description": %w`, err)} + } + } + if _, ok := au.mutation.AuthorID(); au.mutation.AuthorCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Attachment.author"`) + } + return nil +} + +func (au *AttachmentUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := au.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(attachment.Table, attachment.Columns, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID)) + if ps := au.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := au.mutation.IsRemote(); ok { + _spec.SetField(attachment.FieldIsRemote, field.TypeBool, value) + } + if value, ok := au.mutation.URI(); ok { + _spec.SetField(attachment.FieldURI, field.TypeString, value) + } + if value, ok := au.mutation.Extensions(); ok { + _spec.SetField(attachment.FieldExtensions, field.TypeJSON, value) + } + if value, ok := au.mutation.UpdatedAt(); ok { + _spec.SetField(attachment.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := au.mutation.Description(); ok { + _spec.SetField(attachment.FieldDescription, field.TypeString, value) + } + if value, ok := au.mutation.Sha256(); ok { + _spec.SetField(attachment.FieldSha256, field.TypeBytes, value) + } + if value, ok := au.mutation.Size(); ok { + _spec.SetField(attachment.FieldSize, field.TypeInt, value) + } + if value, ok := au.mutation.AddedSize(); ok { + _spec.AddField(attachment.FieldSize, field.TypeInt, value) + } + if value, ok := au.mutation.Blurhash(); ok { + _spec.SetField(attachment.FieldBlurhash, field.TypeString, value) + } + if au.mutation.BlurhashCleared() { + _spec.ClearField(attachment.FieldBlurhash, field.TypeString) + } + if value, ok := au.mutation.Height(); ok { + _spec.SetField(attachment.FieldHeight, field.TypeInt, value) + } + if value, ok := au.mutation.AddedHeight(); ok { + _spec.AddField(attachment.FieldHeight, field.TypeInt, value) + } + if au.mutation.HeightCleared() { + _spec.ClearField(attachment.FieldHeight, field.TypeInt) + } + if value, ok := au.mutation.Width(); ok { + _spec.SetField(attachment.FieldWidth, field.TypeInt, value) + } + if value, ok := au.mutation.AddedWidth(); ok { + _spec.AddField(attachment.FieldWidth, field.TypeInt, value) + } + if au.mutation.WidthCleared() { + _spec.ClearField(attachment.FieldWidth, field.TypeInt) + } + if value, ok := au.mutation.Fps(); ok { + _spec.SetField(attachment.FieldFps, field.TypeInt, value) + } + if value, ok := au.mutation.AddedFps(); ok { + _spec.AddField(attachment.FieldFps, field.TypeInt, value) + } + if au.mutation.FpsCleared() { + _spec.ClearField(attachment.FieldFps, field.TypeInt) + } + if value, ok := au.mutation.MimeType(); ok { + _spec.SetField(attachment.FieldMimeType, field.TypeString, value) + } + if au.mutation.AuthorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: attachment.AuthorTable, + Columns: []string{attachment.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := au.mutation.AuthorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: attachment.AuthorTable, + Columns: []string{attachment.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, au.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{attachment.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + au.mutation.done = true + return n, nil +} + +// AttachmentUpdateOne is the builder for updating a single Attachment entity. +type AttachmentUpdateOne struct { + config + fields []string + hooks []Hook + mutation *AttachmentMutation +} + +// SetIsRemote sets the "isRemote" field. +func (auo *AttachmentUpdateOne) SetIsRemote(b bool) *AttachmentUpdateOne { + auo.mutation.SetIsRemote(b) + return auo +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableIsRemote(b *bool) *AttachmentUpdateOne { + if b != nil { + auo.SetIsRemote(*b) + } + return auo +} + +// SetURI sets the "uri" field. +func (auo *AttachmentUpdateOne) SetURI(s string) *AttachmentUpdateOne { + auo.mutation.SetURI(s) + return auo +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableURI(s *string) *AttachmentUpdateOne { + if s != nil { + auo.SetURI(*s) + } + return auo +} + +// SetExtensions sets the "extensions" field. +func (auo *AttachmentUpdateOne) SetExtensions(l lysand.Extensions) *AttachmentUpdateOne { + auo.mutation.SetExtensions(l) + return auo +} + +// SetUpdatedAt sets the "updated_at" field. +func (auo *AttachmentUpdateOne) SetUpdatedAt(t time.Time) *AttachmentUpdateOne { + auo.mutation.SetUpdatedAt(t) + return auo +} + +// SetDescription sets the "description" field. +func (auo *AttachmentUpdateOne) SetDescription(s string) *AttachmentUpdateOne { + auo.mutation.SetDescription(s) + return auo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableDescription(s *string) *AttachmentUpdateOne { + if s != nil { + auo.SetDescription(*s) + } + return auo +} + +// SetSha256 sets the "sha256" field. +func (auo *AttachmentUpdateOne) SetSha256(b []byte) *AttachmentUpdateOne { + auo.mutation.SetSha256(b) + return auo +} + +// SetSize sets the "size" field. +func (auo *AttachmentUpdateOne) SetSize(i int) *AttachmentUpdateOne { + auo.mutation.ResetSize() + auo.mutation.SetSize(i) + return auo +} + +// SetNillableSize sets the "size" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableSize(i *int) *AttachmentUpdateOne { + if i != nil { + auo.SetSize(*i) + } + return auo +} + +// AddSize adds i to the "size" field. +func (auo *AttachmentUpdateOne) AddSize(i int) *AttachmentUpdateOne { + auo.mutation.AddSize(i) + return auo +} + +// SetBlurhash sets the "blurhash" field. +func (auo *AttachmentUpdateOne) SetBlurhash(s string) *AttachmentUpdateOne { + auo.mutation.SetBlurhash(s) + return auo +} + +// SetNillableBlurhash sets the "blurhash" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableBlurhash(s *string) *AttachmentUpdateOne { + if s != nil { + auo.SetBlurhash(*s) + } + return auo +} + +// ClearBlurhash clears the value of the "blurhash" field. +func (auo *AttachmentUpdateOne) ClearBlurhash() *AttachmentUpdateOne { + auo.mutation.ClearBlurhash() + return auo +} + +// SetHeight sets the "height" field. +func (auo *AttachmentUpdateOne) SetHeight(i int) *AttachmentUpdateOne { + auo.mutation.ResetHeight() + auo.mutation.SetHeight(i) + return auo +} + +// SetNillableHeight sets the "height" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableHeight(i *int) *AttachmentUpdateOne { + if i != nil { + auo.SetHeight(*i) + } + return auo +} + +// AddHeight adds i to the "height" field. +func (auo *AttachmentUpdateOne) AddHeight(i int) *AttachmentUpdateOne { + auo.mutation.AddHeight(i) + return auo +} + +// ClearHeight clears the value of the "height" field. +func (auo *AttachmentUpdateOne) ClearHeight() *AttachmentUpdateOne { + auo.mutation.ClearHeight() + return auo +} + +// SetWidth sets the "width" field. +func (auo *AttachmentUpdateOne) SetWidth(i int) *AttachmentUpdateOne { + auo.mutation.ResetWidth() + auo.mutation.SetWidth(i) + return auo +} + +// SetNillableWidth sets the "width" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableWidth(i *int) *AttachmentUpdateOne { + if i != nil { + auo.SetWidth(*i) + } + return auo +} + +// AddWidth adds i to the "width" field. +func (auo *AttachmentUpdateOne) AddWidth(i int) *AttachmentUpdateOne { + auo.mutation.AddWidth(i) + return auo +} + +// ClearWidth clears the value of the "width" field. +func (auo *AttachmentUpdateOne) ClearWidth() *AttachmentUpdateOne { + auo.mutation.ClearWidth() + return auo +} + +// SetFps sets the "fps" field. +func (auo *AttachmentUpdateOne) SetFps(i int) *AttachmentUpdateOne { + auo.mutation.ResetFps() + auo.mutation.SetFps(i) + return auo +} + +// SetNillableFps sets the "fps" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableFps(i *int) *AttachmentUpdateOne { + if i != nil { + auo.SetFps(*i) + } + return auo +} + +// AddFps adds i to the "fps" field. +func (auo *AttachmentUpdateOne) AddFps(i int) *AttachmentUpdateOne { + auo.mutation.AddFps(i) + return auo +} + +// ClearFps clears the value of the "fps" field. +func (auo *AttachmentUpdateOne) ClearFps() *AttachmentUpdateOne { + auo.mutation.ClearFps() + return auo +} + +// SetMimeType sets the "mimeType" field. +func (auo *AttachmentUpdateOne) SetMimeType(s string) *AttachmentUpdateOne { + auo.mutation.SetMimeType(s) + return auo +} + +// SetNillableMimeType sets the "mimeType" field if the given value is not nil. +func (auo *AttachmentUpdateOne) SetNillableMimeType(s *string) *AttachmentUpdateOne { + if s != nil { + auo.SetMimeType(*s) + } + return auo +} + +// SetAuthorID sets the "author" edge to the User entity by ID. +func (auo *AttachmentUpdateOne) SetAuthorID(id uuid.UUID) *AttachmentUpdateOne { + auo.mutation.SetAuthorID(id) + return auo +} + +// SetAuthor sets the "author" edge to the User entity. +func (auo *AttachmentUpdateOne) SetAuthor(u *User) *AttachmentUpdateOne { + return auo.SetAuthorID(u.ID) +} + +// Mutation returns the AttachmentMutation object of the builder. +func (auo *AttachmentUpdateOne) Mutation() *AttachmentMutation { + return auo.mutation +} + +// ClearAuthor clears the "author" edge to the User entity. +func (auo *AttachmentUpdateOne) ClearAuthor() *AttachmentUpdateOne { + auo.mutation.ClearAuthor() + return auo +} + +// Where appends a list predicates to the AttachmentUpdate builder. +func (auo *AttachmentUpdateOne) Where(ps ...predicate.Attachment) *AttachmentUpdateOne { + auo.mutation.Where(ps...) + return auo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (auo *AttachmentUpdateOne) Select(field string, fields ...string) *AttachmentUpdateOne { + auo.fields = append([]string{field}, fields...) + return auo +} + +// Save executes the query and returns the updated Attachment entity. +func (auo *AttachmentUpdateOne) Save(ctx context.Context) (*Attachment, error) { + auo.defaults() + return withHooks(ctx, auo.sqlSave, auo.mutation, auo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (auo *AttachmentUpdateOne) SaveX(ctx context.Context) *Attachment { + node, err := auo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (auo *AttachmentUpdateOne) Exec(ctx context.Context) error { + _, err := auo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (auo *AttachmentUpdateOne) ExecX(ctx context.Context) { + if err := auo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (auo *AttachmentUpdateOne) defaults() { + if _, ok := auo.mutation.UpdatedAt(); !ok { + v := attachment.UpdateDefaultUpdatedAt() + auo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (auo *AttachmentUpdateOne) check() error { + if v, ok := auo.mutation.URI(); ok { + if err := attachment.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Attachment.uri": %w`, err)} + } + } + if v, ok := auo.mutation.Description(); ok { + if err := attachment.DescriptionValidator(v); err != nil { + return &ValidationError{Name: "description", err: fmt.Errorf(`ent: validator failed for field "Attachment.description": %w`, err)} + } + } + if _, ok := auo.mutation.AuthorID(); auo.mutation.AuthorCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Attachment.author"`) + } + return nil +} + +func (auo *AttachmentUpdateOne) sqlSave(ctx context.Context) (_node *Attachment, err error) { + if err := auo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(attachment.Table, attachment.Columns, sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID)) + id, ok := auo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Attachment.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := auo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, attachment.FieldID) + for _, f := range fields { + if !attachment.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != attachment.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := auo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := auo.mutation.IsRemote(); ok { + _spec.SetField(attachment.FieldIsRemote, field.TypeBool, value) + } + if value, ok := auo.mutation.URI(); ok { + _spec.SetField(attachment.FieldURI, field.TypeString, value) + } + if value, ok := auo.mutation.Extensions(); ok { + _spec.SetField(attachment.FieldExtensions, field.TypeJSON, value) + } + if value, ok := auo.mutation.UpdatedAt(); ok { + _spec.SetField(attachment.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := auo.mutation.Description(); ok { + _spec.SetField(attachment.FieldDescription, field.TypeString, value) + } + if value, ok := auo.mutation.Sha256(); ok { + _spec.SetField(attachment.FieldSha256, field.TypeBytes, value) + } + if value, ok := auo.mutation.Size(); ok { + _spec.SetField(attachment.FieldSize, field.TypeInt, value) + } + if value, ok := auo.mutation.AddedSize(); ok { + _spec.AddField(attachment.FieldSize, field.TypeInt, value) + } + if value, ok := auo.mutation.Blurhash(); ok { + _spec.SetField(attachment.FieldBlurhash, field.TypeString, value) + } + if auo.mutation.BlurhashCleared() { + _spec.ClearField(attachment.FieldBlurhash, field.TypeString) + } + if value, ok := auo.mutation.Height(); ok { + _spec.SetField(attachment.FieldHeight, field.TypeInt, value) + } + if value, ok := auo.mutation.AddedHeight(); ok { + _spec.AddField(attachment.FieldHeight, field.TypeInt, value) + } + if auo.mutation.HeightCleared() { + _spec.ClearField(attachment.FieldHeight, field.TypeInt) + } + if value, ok := auo.mutation.Width(); ok { + _spec.SetField(attachment.FieldWidth, field.TypeInt, value) + } + if value, ok := auo.mutation.AddedWidth(); ok { + _spec.AddField(attachment.FieldWidth, field.TypeInt, value) + } + if auo.mutation.WidthCleared() { + _spec.ClearField(attachment.FieldWidth, field.TypeInt) + } + if value, ok := auo.mutation.Fps(); ok { + _spec.SetField(attachment.FieldFps, field.TypeInt, value) + } + if value, ok := auo.mutation.AddedFps(); ok { + _spec.AddField(attachment.FieldFps, field.TypeInt, value) + } + if auo.mutation.FpsCleared() { + _spec.ClearField(attachment.FieldFps, field.TypeInt) + } + if value, ok := auo.mutation.MimeType(); ok { + _spec.SetField(attachment.FieldMimeType, field.TypeString, value) + } + if auo.mutation.AuthorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: attachment.AuthorTable, + Columns: []string{attachment.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := auo.mutation.AuthorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: attachment.AuthorTable, + Columns: []string{attachment.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Attachment{config: auo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, auo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{attachment.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + auo.mutation.done = true + return _node, nil +} diff --git a/ent/client.go b/ent/client.go new file mode 100644 index 0000000..7e546e7 --- /dev/null +++ b/ent/client.go @@ -0,0 +1,1247 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "log" + "reflect" + + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/migrate" + + "entgo.io/ent" + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" +) + +// Client is the client that holds all ent builders. +type Client struct { + config + // Schema is the client for creating, migrating and dropping schema. + Schema *migrate.Schema + // Attachment is the client for interacting with the Attachment builders. + Attachment *AttachmentClient + // Follow is the client for interacting with the Follow builders. + Follow *FollowClient + // Image is the client for interacting with the Image builders. + Image *ImageClient + // Note is the client for interacting with the Note builders. + Note *NoteClient + // ServerMetadata is the client for interacting with the ServerMetadata builders. + ServerMetadata *ServerMetadataClient + // User is the client for interacting with the User builders. + User *UserClient +} + +// NewClient creates a new client configured with the given options. +func NewClient(opts ...Option) *Client { + client := &Client{config: newConfig(opts...)} + client.init() + return client +} + +func (c *Client) init() { + c.Schema = migrate.NewSchema(c.driver) + c.Attachment = NewAttachmentClient(c.config) + c.Follow = NewFollowClient(c.config) + c.Image = NewImageClient(c.config) + c.Note = NewNoteClient(c.config) + c.ServerMetadata = NewServerMetadataClient(c.config) + c.User = NewUserClient(c.config) +} + +type ( + // config is the configuration for the client and its builder. + config struct { + // driver used for executing database requests. + driver dialect.Driver + // debug enable a debug logging. + debug bool + // log used for logging on debug mode. + log func(...any) + // hooks to execute on mutations. + hooks *hooks + // interceptors to execute on queries. + inters *inters + } + // Option function to configure the client. + Option func(*config) +) + +// newConfig creates a new config for the client. +func newConfig(opts ...Option) config { + cfg := config{log: log.Println, hooks: &hooks{}, inters: &inters{}} + cfg.options(opts...) + return cfg +} + +// options applies the options on the config object. +func (c *config) options(opts ...Option) { + for _, opt := range opts { + opt(c) + } + if c.debug { + c.driver = dialect.Debug(c.driver, c.log) + } +} + +// Debug enables debug logging on the ent.Driver. +func Debug() Option { + return func(c *config) { + c.debug = true + } +} + +// Log sets the logging function for debug mode. +func Log(fn func(...any)) Option { + return func(c *config) { + c.log = fn + } +} + +// Driver configures the client driver. +func Driver(driver dialect.Driver) Option { + return func(c *config) { + c.driver = driver + } +} + +// Open opens a database/sql.DB specified by the driver name and +// the data source name, and returns a new client attached to it. +// Optional parameters can be added for configuring the client. +func Open(driverName, dataSourceName string, options ...Option) (*Client, error) { + switch driverName { + case dialect.MySQL, dialect.Postgres, dialect.SQLite: + drv, err := sql.Open(driverName, dataSourceName) + if err != nil { + return nil, err + } + return NewClient(append(options, Driver(drv))...), nil + default: + return nil, fmt.Errorf("unsupported driver: %q", driverName) + } +} + +// ErrTxStarted is returned when trying to start a new transaction from a transactional client. +var ErrTxStarted = errors.New("ent: cannot start a transaction within a transaction") + +// Tx returns a new transactional client. The provided context +// is used until the transaction is committed or rolled back. +func (c *Client) Tx(ctx context.Context) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, ErrTxStarted + } + tx, err := newTx(ctx, c.driver) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = tx + return &Tx{ + ctx: ctx, + config: cfg, + Attachment: NewAttachmentClient(cfg), + Follow: NewFollowClient(cfg), + Image: NewImageClient(cfg), + Note: NewNoteClient(cfg), + ServerMetadata: NewServerMetadataClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// BeginTx returns a transactional client with specified options. +func (c *Client) BeginTx(ctx context.Context, opts *sql.TxOptions) (*Tx, error) { + if _, ok := c.driver.(*txDriver); ok { + return nil, errors.New("ent: cannot start a transaction within a transaction") + } + tx, err := c.driver.(interface { + BeginTx(context.Context, *sql.TxOptions) (dialect.Tx, error) + }).BeginTx(ctx, opts) + if err != nil { + return nil, fmt.Errorf("ent: starting a transaction: %w", err) + } + cfg := c.config + cfg.driver = &txDriver{tx: tx, drv: c.driver} + return &Tx{ + ctx: ctx, + config: cfg, + Attachment: NewAttachmentClient(cfg), + Follow: NewFollowClient(cfg), + Image: NewImageClient(cfg), + Note: NewNoteClient(cfg), + ServerMetadata: NewServerMetadataClient(cfg), + User: NewUserClient(cfg), + }, nil +} + +// Debug returns a new debug-client. It's used to get verbose logging on specific operations. +// +// client.Debug(). +// Attachment. +// Query(). +// Count(ctx) +func (c *Client) Debug() *Client { + if c.debug { + return c + } + cfg := c.config + cfg.driver = dialect.Debug(c.driver, c.log) + client := &Client{config: cfg} + client.init() + return client +} + +// Close closes the database connection and prevents new queries from starting. +func (c *Client) Close() error { + return c.driver.Close() +} + +// Use adds the mutation hooks to all the entity clients. +// In order to add hooks to a specific client, call: `client.Node.Use(...)`. +func (c *Client) Use(hooks ...Hook) { + for _, n := range []interface{ Use(...Hook) }{ + c.Attachment, c.Follow, c.Image, c.Note, c.ServerMetadata, c.User, + } { + n.Use(hooks...) + } +} + +// Intercept adds the query interceptors to all the entity clients. +// In order to add interceptors to a specific client, call: `client.Node.Intercept(...)`. +func (c *Client) Intercept(interceptors ...Interceptor) { + for _, n := range []interface{ Intercept(...Interceptor) }{ + c.Attachment, c.Follow, c.Image, c.Note, c.ServerMetadata, c.User, + } { + n.Intercept(interceptors...) + } +} + +// Mutate implements the ent.Mutator interface. +func (c *Client) Mutate(ctx context.Context, m Mutation) (Value, error) { + switch m := m.(type) { + case *AttachmentMutation: + return c.Attachment.mutate(ctx, m) + case *FollowMutation: + return c.Follow.mutate(ctx, m) + case *ImageMutation: + return c.Image.mutate(ctx, m) + case *NoteMutation: + return c.Note.mutate(ctx, m) + case *ServerMetadataMutation: + return c.ServerMetadata.mutate(ctx, m) + case *UserMutation: + return c.User.mutate(ctx, m) + default: + return nil, fmt.Errorf("ent: unknown mutation type %T", m) + } +} + +// AttachmentClient is a client for the Attachment schema. +type AttachmentClient struct { + config +} + +// NewAttachmentClient returns a client for the Attachment from the given config. +func NewAttachmentClient(c config) *AttachmentClient { + return &AttachmentClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `attachment.Hooks(f(g(h())))`. +func (c *AttachmentClient) Use(hooks ...Hook) { + c.hooks.Attachment = append(c.hooks.Attachment, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `attachment.Intercept(f(g(h())))`. +func (c *AttachmentClient) Intercept(interceptors ...Interceptor) { + c.inters.Attachment = append(c.inters.Attachment, interceptors...) +} + +// Create returns a builder for creating a Attachment entity. +func (c *AttachmentClient) Create() *AttachmentCreate { + mutation := newAttachmentMutation(c.config, OpCreate) + return &AttachmentCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Attachment entities. +func (c *AttachmentClient) CreateBulk(builders ...*AttachmentCreate) *AttachmentCreateBulk { + return &AttachmentCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *AttachmentClient) MapCreateBulk(slice any, setFunc func(*AttachmentCreate, int)) *AttachmentCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &AttachmentCreateBulk{err: fmt.Errorf("calling to AttachmentClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*AttachmentCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &AttachmentCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Attachment. +func (c *AttachmentClient) Update() *AttachmentUpdate { + mutation := newAttachmentMutation(c.config, OpUpdate) + return &AttachmentUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *AttachmentClient) UpdateOne(a *Attachment) *AttachmentUpdateOne { + mutation := newAttachmentMutation(c.config, OpUpdateOne, withAttachment(a)) + return &AttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *AttachmentClient) UpdateOneID(id uuid.UUID) *AttachmentUpdateOne { + mutation := newAttachmentMutation(c.config, OpUpdateOne, withAttachmentID(id)) + return &AttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Attachment. +func (c *AttachmentClient) Delete() *AttachmentDelete { + mutation := newAttachmentMutation(c.config, OpDelete) + return &AttachmentDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *AttachmentClient) DeleteOne(a *Attachment) *AttachmentDeleteOne { + return c.DeleteOneID(a.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *AttachmentClient) DeleteOneID(id uuid.UUID) *AttachmentDeleteOne { + builder := c.Delete().Where(attachment.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &AttachmentDeleteOne{builder} +} + +// Query returns a query builder for Attachment. +func (c *AttachmentClient) Query() *AttachmentQuery { + return &AttachmentQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeAttachment}, + inters: c.Interceptors(), + } +} + +// Get returns a Attachment entity by its id. +func (c *AttachmentClient) Get(ctx context.Context, id uuid.UUID) (*Attachment, error) { + return c.Query().Where(attachment.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *AttachmentClient) GetX(ctx context.Context, id uuid.UUID) *Attachment { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAuthor queries the author edge of a Attachment. +func (c *AttachmentClient) QueryAuthor(a *Attachment) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := a.ID + step := sqlgraph.NewStep( + sqlgraph.From(attachment.Table, attachment.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, attachment.AuthorTable, attachment.AuthorColumn), + ) + fromV = sqlgraph.Neighbors(a.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *AttachmentClient) Hooks() []Hook { + return c.hooks.Attachment +} + +// Interceptors returns the client interceptors. +func (c *AttachmentClient) Interceptors() []Interceptor { + return c.inters.Attachment +} + +func (c *AttachmentClient) mutate(ctx context.Context, m *AttachmentMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&AttachmentCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&AttachmentUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&AttachmentUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&AttachmentDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Attachment mutation op: %q", m.Op()) + } +} + +// FollowClient is a client for the Follow schema. +type FollowClient struct { + config +} + +// NewFollowClient returns a client for the Follow from the given config. +func NewFollowClient(c config) *FollowClient { + return &FollowClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `follow.Hooks(f(g(h())))`. +func (c *FollowClient) Use(hooks ...Hook) { + c.hooks.Follow = append(c.hooks.Follow, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `follow.Intercept(f(g(h())))`. +func (c *FollowClient) Intercept(interceptors ...Interceptor) { + c.inters.Follow = append(c.inters.Follow, interceptors...) +} + +// Create returns a builder for creating a Follow entity. +func (c *FollowClient) Create() *FollowCreate { + mutation := newFollowMutation(c.config, OpCreate) + return &FollowCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Follow entities. +func (c *FollowClient) CreateBulk(builders ...*FollowCreate) *FollowCreateBulk { + return &FollowCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *FollowClient) MapCreateBulk(slice any, setFunc func(*FollowCreate, int)) *FollowCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &FollowCreateBulk{err: fmt.Errorf("calling to FollowClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*FollowCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &FollowCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Follow. +func (c *FollowClient) Update() *FollowUpdate { + mutation := newFollowMutation(c.config, OpUpdate) + return &FollowUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *FollowClient) UpdateOne(f *Follow) *FollowUpdateOne { + mutation := newFollowMutation(c.config, OpUpdateOne, withFollow(f)) + return &FollowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *FollowClient) UpdateOneID(id uuid.UUID) *FollowUpdateOne { + mutation := newFollowMutation(c.config, OpUpdateOne, withFollowID(id)) + return &FollowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Follow. +func (c *FollowClient) Delete() *FollowDelete { + mutation := newFollowMutation(c.config, OpDelete) + return &FollowDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *FollowClient) DeleteOne(f *Follow) *FollowDeleteOne { + return c.DeleteOneID(f.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *FollowClient) DeleteOneID(id uuid.UUID) *FollowDeleteOne { + builder := c.Delete().Where(follow.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &FollowDeleteOne{builder} +} + +// Query returns a query builder for Follow. +func (c *FollowClient) Query() *FollowQuery { + return &FollowQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeFollow}, + inters: c.Interceptors(), + } +} + +// Get returns a Follow entity by its id. +func (c *FollowClient) Get(ctx context.Context, id uuid.UUID) (*Follow, error) { + return c.Query().Where(follow.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *FollowClient) GetX(ctx context.Context, id uuid.UUID) *Follow { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryFollower queries the follower edge of a Follow. +func (c *FollowClient) QueryFollower(f *Follow) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := f.ID + step := sqlgraph.NewStep( + sqlgraph.From(follow.Table, follow.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, follow.FollowerTable, follow.FollowerColumn), + ) + fromV = sqlgraph.Neighbors(f.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryFollowee queries the followee edge of a Follow. +func (c *FollowClient) QueryFollowee(f *Follow) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := f.ID + step := sqlgraph.NewStep( + sqlgraph.From(follow.Table, follow.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, follow.FolloweeTable, follow.FolloweeColumn), + ) + fromV = sqlgraph.Neighbors(f.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *FollowClient) Hooks() []Hook { + return c.hooks.Follow +} + +// Interceptors returns the client interceptors. +func (c *FollowClient) Interceptors() []Interceptor { + return c.inters.Follow +} + +func (c *FollowClient) mutate(ctx context.Context, m *FollowMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&FollowCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&FollowUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&FollowUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&FollowDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Follow mutation op: %q", m.Op()) + } +} + +// ImageClient is a client for the Image schema. +type ImageClient struct { + config +} + +// NewImageClient returns a client for the Image from the given config. +func NewImageClient(c config) *ImageClient { + return &ImageClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `image.Hooks(f(g(h())))`. +func (c *ImageClient) Use(hooks ...Hook) { + c.hooks.Image = append(c.hooks.Image, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `image.Intercept(f(g(h())))`. +func (c *ImageClient) Intercept(interceptors ...Interceptor) { + c.inters.Image = append(c.inters.Image, interceptors...) +} + +// Create returns a builder for creating a Image entity. +func (c *ImageClient) Create() *ImageCreate { + mutation := newImageMutation(c.config, OpCreate) + return &ImageCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Image entities. +func (c *ImageClient) CreateBulk(builders ...*ImageCreate) *ImageCreateBulk { + return &ImageCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ImageClient) MapCreateBulk(slice any, setFunc func(*ImageCreate, int)) *ImageCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ImageCreateBulk{err: fmt.Errorf("calling to ImageClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ImageCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ImageCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Image. +func (c *ImageClient) Update() *ImageUpdate { + mutation := newImageMutation(c.config, OpUpdate) + return &ImageUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ImageClient) UpdateOne(i *Image) *ImageUpdateOne { + mutation := newImageMutation(c.config, OpUpdateOne, withImage(i)) + return &ImageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ImageClient) UpdateOneID(id int) *ImageUpdateOne { + mutation := newImageMutation(c.config, OpUpdateOne, withImageID(id)) + return &ImageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Image. +func (c *ImageClient) Delete() *ImageDelete { + mutation := newImageMutation(c.config, OpDelete) + return &ImageDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ImageClient) DeleteOne(i *Image) *ImageDeleteOne { + return c.DeleteOneID(i.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ImageClient) DeleteOneID(id int) *ImageDeleteOne { + builder := c.Delete().Where(image.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ImageDeleteOne{builder} +} + +// Query returns a query builder for Image. +func (c *ImageClient) Query() *ImageQuery { + return &ImageQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeImage}, + inters: c.Interceptors(), + } +} + +// Get returns a Image entity by its id. +func (c *ImageClient) Get(ctx context.Context, id int) (*Image, error) { + return c.Query().Where(image.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ImageClient) GetX(ctx context.Context, id int) *Image { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// Hooks returns the client hooks. +func (c *ImageClient) Hooks() []Hook { + return c.hooks.Image +} + +// Interceptors returns the client interceptors. +func (c *ImageClient) Interceptors() []Interceptor { + return c.inters.Image +} + +func (c *ImageClient) mutate(ctx context.Context, m *ImageMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ImageCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ImageUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ImageUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ImageDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Image mutation op: %q", m.Op()) + } +} + +// NoteClient is a client for the Note schema. +type NoteClient struct { + config +} + +// NewNoteClient returns a client for the Note from the given config. +func NewNoteClient(c config) *NoteClient { + return &NoteClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `note.Hooks(f(g(h())))`. +func (c *NoteClient) Use(hooks ...Hook) { + c.hooks.Note = append(c.hooks.Note, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `note.Intercept(f(g(h())))`. +func (c *NoteClient) Intercept(interceptors ...Interceptor) { + c.inters.Note = append(c.inters.Note, interceptors...) +} + +// Create returns a builder for creating a Note entity. +func (c *NoteClient) Create() *NoteCreate { + mutation := newNoteMutation(c.config, OpCreate) + return &NoteCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of Note entities. +func (c *NoteClient) CreateBulk(builders ...*NoteCreate) *NoteCreateBulk { + return &NoteCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *NoteClient) MapCreateBulk(slice any, setFunc func(*NoteCreate, int)) *NoteCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &NoteCreateBulk{err: fmt.Errorf("calling to NoteClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*NoteCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &NoteCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for Note. +func (c *NoteClient) Update() *NoteUpdate { + mutation := newNoteMutation(c.config, OpUpdate) + return &NoteUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *NoteClient) UpdateOne(n *Note) *NoteUpdateOne { + mutation := newNoteMutation(c.config, OpUpdateOne, withNote(n)) + return &NoteUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *NoteClient) UpdateOneID(id uuid.UUID) *NoteUpdateOne { + mutation := newNoteMutation(c.config, OpUpdateOne, withNoteID(id)) + return &NoteUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for Note. +func (c *NoteClient) Delete() *NoteDelete { + mutation := newNoteMutation(c.config, OpDelete) + return &NoteDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *NoteClient) DeleteOne(n *Note) *NoteDeleteOne { + return c.DeleteOneID(n.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *NoteClient) DeleteOneID(id uuid.UUID) *NoteDeleteOne { + builder := c.Delete().Where(note.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &NoteDeleteOne{builder} +} + +// Query returns a query builder for Note. +func (c *NoteClient) Query() *NoteQuery { + return &NoteQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeNote}, + inters: c.Interceptors(), + } +} + +// Get returns a Note entity by its id. +func (c *NoteClient) Get(ctx context.Context, id uuid.UUID) (*Note, error) { + return c.Query().Where(note.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *NoteClient) GetX(ctx context.Context, id uuid.UUID) *Note { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAuthor queries the author edge of a Note. +func (c *NoteClient) QueryAuthor(n *Note) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(note.Table, note.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, note.AuthorTable, note.AuthorColumn), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryMentions queries the mentions edge of a Note. +func (c *NoteClient) QueryMentions(n *Note) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(note.Table, note.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, note.MentionsTable, note.MentionsPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAttachments queries the attachments edge of a Note. +func (c *NoteClient) QueryAttachments(n *Note) *AttachmentQuery { + query := (&AttachmentClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := n.ID + step := sqlgraph.NewStep( + sqlgraph.From(note.Table, note.FieldID, id), + sqlgraph.To(attachment.Table, attachment.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, note.AttachmentsTable, note.AttachmentsColumn), + ) + fromV = sqlgraph.Neighbors(n.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *NoteClient) Hooks() []Hook { + return c.hooks.Note +} + +// Interceptors returns the client interceptors. +func (c *NoteClient) Interceptors() []Interceptor { + return c.inters.Note +} + +func (c *NoteClient) mutate(ctx context.Context, m *NoteMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&NoteCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&NoteUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&NoteUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&NoteDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown Note mutation op: %q", m.Op()) + } +} + +// ServerMetadataClient is a client for the ServerMetadata schema. +type ServerMetadataClient struct { + config +} + +// NewServerMetadataClient returns a client for the ServerMetadata from the given config. +func NewServerMetadataClient(c config) *ServerMetadataClient { + return &ServerMetadataClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `servermetadata.Hooks(f(g(h())))`. +func (c *ServerMetadataClient) Use(hooks ...Hook) { + c.hooks.ServerMetadata = append(c.hooks.ServerMetadata, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `servermetadata.Intercept(f(g(h())))`. +func (c *ServerMetadataClient) Intercept(interceptors ...Interceptor) { + c.inters.ServerMetadata = append(c.inters.ServerMetadata, interceptors...) +} + +// Create returns a builder for creating a ServerMetadata entity. +func (c *ServerMetadataClient) Create() *ServerMetadataCreate { + mutation := newServerMetadataMutation(c.config, OpCreate) + return &ServerMetadataCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of ServerMetadata entities. +func (c *ServerMetadataClient) CreateBulk(builders ...*ServerMetadataCreate) *ServerMetadataCreateBulk { + return &ServerMetadataCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *ServerMetadataClient) MapCreateBulk(slice any, setFunc func(*ServerMetadataCreate, int)) *ServerMetadataCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &ServerMetadataCreateBulk{err: fmt.Errorf("calling to ServerMetadataClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*ServerMetadataCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &ServerMetadataCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for ServerMetadata. +func (c *ServerMetadataClient) Update() *ServerMetadataUpdate { + mutation := newServerMetadataMutation(c.config, OpUpdate) + return &ServerMetadataUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *ServerMetadataClient) UpdateOne(sm *ServerMetadata) *ServerMetadataUpdateOne { + mutation := newServerMetadataMutation(c.config, OpUpdateOne, withServerMetadata(sm)) + return &ServerMetadataUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *ServerMetadataClient) UpdateOneID(id uuid.UUID) *ServerMetadataUpdateOne { + mutation := newServerMetadataMutation(c.config, OpUpdateOne, withServerMetadataID(id)) + return &ServerMetadataUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for ServerMetadata. +func (c *ServerMetadataClient) Delete() *ServerMetadataDelete { + mutation := newServerMetadataMutation(c.config, OpDelete) + return &ServerMetadataDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *ServerMetadataClient) DeleteOne(sm *ServerMetadata) *ServerMetadataDeleteOne { + return c.DeleteOneID(sm.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *ServerMetadataClient) DeleteOneID(id uuid.UUID) *ServerMetadataDeleteOne { + builder := c.Delete().Where(servermetadata.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &ServerMetadataDeleteOne{builder} +} + +// Query returns a query builder for ServerMetadata. +func (c *ServerMetadataClient) Query() *ServerMetadataQuery { + return &ServerMetadataQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeServerMetadata}, + inters: c.Interceptors(), + } +} + +// Get returns a ServerMetadata entity by its id. +func (c *ServerMetadataClient) Get(ctx context.Context, id uuid.UUID) (*ServerMetadata, error) { + return c.Query().Where(servermetadata.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *ServerMetadataClient) GetX(ctx context.Context, id uuid.UUID) *ServerMetadata { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryFollower queries the follower edge of a ServerMetadata. +func (c *ServerMetadataClient) QueryFollower(sm *ServerMetadata) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := sm.ID + step := sqlgraph.NewStep( + sqlgraph.From(servermetadata.Table, servermetadata.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, servermetadata.FollowerTable, servermetadata.FollowerColumn), + ) + fromV = sqlgraph.Neighbors(sm.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryFollowee queries the followee edge of a ServerMetadata. +func (c *ServerMetadataClient) QueryFollowee(sm *ServerMetadata) *UserQuery { + query := (&UserClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := sm.ID + step := sqlgraph.NewStep( + sqlgraph.From(servermetadata.Table, servermetadata.FieldID, id), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, servermetadata.FolloweeTable, servermetadata.FolloweeColumn), + ) + fromV = sqlgraph.Neighbors(sm.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *ServerMetadataClient) Hooks() []Hook { + return c.hooks.ServerMetadata +} + +// Interceptors returns the client interceptors. +func (c *ServerMetadataClient) Interceptors() []Interceptor { + return c.inters.ServerMetadata +} + +func (c *ServerMetadataClient) mutate(ctx context.Context, m *ServerMetadataMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&ServerMetadataCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&ServerMetadataUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&ServerMetadataUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&ServerMetadataDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown ServerMetadata mutation op: %q", m.Op()) + } +} + +// UserClient is a client for the User schema. +type UserClient struct { + config +} + +// NewUserClient returns a client for the User from the given config. +func NewUserClient(c config) *UserClient { + return &UserClient{config: c} +} + +// Use adds a list of mutation hooks to the hooks stack. +// A call to `Use(f, g, h)` equals to `user.Hooks(f(g(h())))`. +func (c *UserClient) Use(hooks ...Hook) { + c.hooks.User = append(c.hooks.User, hooks...) +} + +// Intercept adds a list of query interceptors to the interceptors stack. +// A call to `Intercept(f, g, h)` equals to `user.Intercept(f(g(h())))`. +func (c *UserClient) Intercept(interceptors ...Interceptor) { + c.inters.User = append(c.inters.User, interceptors...) +} + +// Create returns a builder for creating a User entity. +func (c *UserClient) Create() *UserCreate { + mutation := newUserMutation(c.config, OpCreate) + return &UserCreate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// CreateBulk returns a builder for creating a bulk of User entities. +func (c *UserClient) CreateBulk(builders ...*UserCreate) *UserCreateBulk { + return &UserCreateBulk{config: c.config, builders: builders} +} + +// MapCreateBulk creates a bulk creation builder from the given slice. For each item in the slice, the function creates +// a builder and applies setFunc on it. +func (c *UserClient) MapCreateBulk(slice any, setFunc func(*UserCreate, int)) *UserCreateBulk { + rv := reflect.ValueOf(slice) + if rv.Kind() != reflect.Slice { + return &UserCreateBulk{err: fmt.Errorf("calling to UserClient.MapCreateBulk with wrong type %T, need slice", slice)} + } + builders := make([]*UserCreate, rv.Len()) + for i := 0; i < rv.Len(); i++ { + builders[i] = c.Create() + setFunc(builders[i], i) + } + return &UserCreateBulk{config: c.config, builders: builders} +} + +// Update returns an update builder for User. +func (c *UserClient) Update() *UserUpdate { + mutation := newUserMutation(c.config, OpUpdate) + return &UserUpdate{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOne returns an update builder for the given entity. +func (c *UserClient) UpdateOne(u *User) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUser(u)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// UpdateOneID returns an update builder for the given id. +func (c *UserClient) UpdateOneID(id uuid.UUID) *UserUpdateOne { + mutation := newUserMutation(c.config, OpUpdateOne, withUserID(id)) + return &UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// Delete returns a delete builder for User. +func (c *UserClient) Delete() *UserDelete { + mutation := newUserMutation(c.config, OpDelete) + return &UserDelete{config: c.config, hooks: c.Hooks(), mutation: mutation} +} + +// DeleteOne returns a builder for deleting the given entity. +func (c *UserClient) DeleteOne(u *User) *UserDeleteOne { + return c.DeleteOneID(u.ID) +} + +// DeleteOneID returns a builder for deleting the given entity by its id. +func (c *UserClient) DeleteOneID(id uuid.UUID) *UserDeleteOne { + builder := c.Delete().Where(user.ID(id)) + builder.mutation.id = &id + builder.mutation.op = OpDeleteOne + return &UserDeleteOne{builder} +} + +// Query returns a query builder for User. +func (c *UserClient) Query() *UserQuery { + return &UserQuery{ + config: c.config, + ctx: &QueryContext{Type: TypeUser}, + inters: c.Interceptors(), + } +} + +// Get returns a User entity by its id. +func (c *UserClient) Get(ctx context.Context, id uuid.UUID) (*User, error) { + return c.Query().Where(user.ID(id)).Only(ctx) +} + +// GetX is like Get, but panics if an error occurs. +func (c *UserClient) GetX(ctx context.Context, id uuid.UUID) *User { + obj, err := c.Get(ctx, id) + if err != nil { + panic(err) + } + return obj +} + +// QueryAvatarImage queries the avatarImage edge of a User. +func (c *UserClient) QueryAvatarImage(u *User) *ImageQuery { + query := (&ImageClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(image.Table, image.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, user.AvatarImageTable, user.AvatarImageColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryHeaderImage queries the headerImage edge of a User. +func (c *UserClient) QueryHeaderImage(u *User) *ImageQuery { + query := (&ImageClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(image.Table, image.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, user.HeaderImageTable, user.HeaderImageColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryAuthoredNotes queries the authoredNotes edge of a User. +func (c *UserClient) QueryAuthoredNotes(u *User) *NoteQuery { + query := (&NoteClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(note.Table, note.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, user.AuthoredNotesTable, user.AuthoredNotesColumn), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// QueryMentionedNotes queries the mentionedNotes edge of a User. +func (c *UserClient) QueryMentionedNotes(u *User) *NoteQuery { + query := (&NoteClient{config: c.config}).Query() + query.path = func(context.Context) (fromV *sql.Selector, _ error) { + id := u.ID + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, id), + sqlgraph.To(note.Table, note.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, user.MentionedNotesTable, user.MentionedNotesPrimaryKey...), + ) + fromV = sqlgraph.Neighbors(u.driver.Dialect(), step) + return fromV, nil + } + return query +} + +// Hooks returns the client hooks. +func (c *UserClient) Hooks() []Hook { + return c.hooks.User +} + +// Interceptors returns the client interceptors. +func (c *UserClient) Interceptors() []Interceptor { + return c.inters.User +} + +func (c *UserClient) mutate(ctx context.Context, m *UserMutation) (Value, error) { + switch m.Op() { + case OpCreate: + return (&UserCreate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdate: + return (&UserUpdate{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpUpdateOne: + return (&UserUpdateOne{config: c.config, hooks: c.Hooks(), mutation: m}).Save(ctx) + case OpDelete, OpDeleteOne: + return (&UserDelete{config: c.config, hooks: c.Hooks(), mutation: m}).Exec(ctx) + default: + return nil, fmt.Errorf("ent: unknown User mutation op: %q", m.Op()) + } +} + +// hooks and interceptors per client, for fast access. +type ( + hooks struct { + Attachment, Follow, Image, Note, ServerMetadata, User []ent.Hook + } + inters struct { + Attachment, Follow, Image, Note, ServerMetadata, User []ent.Interceptor + } +) diff --git a/ent/ent.go b/ent/ent.go new file mode 100644 index 0000000..50b840a --- /dev/null +++ b/ent/ent.go @@ -0,0 +1,618 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "reflect" + "sync" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" +) + +// ent aliases to avoid import conflicts in user's code. +type ( + Op = ent.Op + Hook = ent.Hook + Value = ent.Value + Query = ent.Query + QueryContext = ent.QueryContext + Querier = ent.Querier + QuerierFunc = ent.QuerierFunc + Interceptor = ent.Interceptor + InterceptFunc = ent.InterceptFunc + Traverser = ent.Traverser + TraverseFunc = ent.TraverseFunc + Policy = ent.Policy + Mutator = ent.Mutator + Mutation = ent.Mutation + MutateFunc = ent.MutateFunc +) + +type clientCtxKey struct{} + +// FromContext returns a Client stored inside a context, or nil if there isn't one. +func FromContext(ctx context.Context) *Client { + c, _ := ctx.Value(clientCtxKey{}).(*Client) + return c +} + +// NewContext returns a new context with the given Client attached. +func NewContext(parent context.Context, c *Client) context.Context { + return context.WithValue(parent, clientCtxKey{}, c) +} + +type txCtxKey struct{} + +// TxFromContext returns a Tx stored inside a context, or nil if there isn't one. +func TxFromContext(ctx context.Context) *Tx { + tx, _ := ctx.Value(txCtxKey{}).(*Tx) + return tx +} + +// NewTxContext returns a new context with the given Tx attached. +func NewTxContext(parent context.Context, tx *Tx) context.Context { + return context.WithValue(parent, txCtxKey{}, tx) +} + +// OrderFunc applies an ordering on the sql selector. +// Deprecated: Use Asc/Desc functions or the package builders instead. +type OrderFunc func(*sql.Selector) + +var ( + initCheck sync.Once + columnCheck sql.ColumnCheck +) + +// columnChecker checks if the column exists in the given table. +func checkColumn(table, column string) error { + initCheck.Do(func() { + columnCheck = sql.NewColumnCheck(map[string]func(string) bool{ + attachment.Table: attachment.ValidColumn, + follow.Table: follow.ValidColumn, + image.Table: image.ValidColumn, + note.Table: note.ValidColumn, + servermetadata.Table: servermetadata.ValidColumn, + user.Table: user.ValidColumn, + }) + }) + return columnCheck(table, column) +} + +// Asc applies the given fields in ASC order. +func Asc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Asc(s.C(f))) + } + } +} + +// Desc applies the given fields in DESC order. +func Desc(fields ...string) func(*sql.Selector) { + return func(s *sql.Selector) { + for _, f := range fields { + if err := checkColumn(s.TableName(), f); err != nil { + s.AddError(&ValidationError{Name: f, err: fmt.Errorf("ent: %w", err)}) + } + s.OrderBy(sql.Desc(s.C(f))) + } + } +} + +// AggregateFunc applies an aggregation step on the group-by traversal/selector. +type AggregateFunc func(*sql.Selector) string + +// As is a pseudo aggregation function for renaming another other functions with custom names. For example: +// +// GroupBy(field1, field2). +// Aggregate(ent.As(ent.Sum(field1), "sum_field1"), (ent.As(ent.Sum(field2), "sum_field2")). +// Scan(ctx, &v) +func As(fn AggregateFunc, end string) AggregateFunc { + return func(s *sql.Selector) string { + return sql.As(fn(s), end) + } +} + +// Count applies the "count" aggregation function on each group. +func Count() AggregateFunc { + return func(s *sql.Selector) string { + return sql.Count("*") + } +} + +// Max applies the "max" aggregation function on the given field of each group. +func Max(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Max(s.C(field)) + } +} + +// Mean applies the "mean" aggregation function on the given field of each group. +func Mean(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Avg(s.C(field)) + } +} + +// Min applies the "min" aggregation function on the given field of each group. +func Min(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Min(s.C(field)) + } +} + +// Sum applies the "sum" aggregation function on the given field of each group. +func Sum(field string) AggregateFunc { + return func(s *sql.Selector) string { + if err := checkColumn(s.TableName(), field); err != nil { + s.AddError(&ValidationError{Name: field, err: fmt.Errorf("ent: %w", err)}) + return "" + } + return sql.Sum(s.C(field)) + } +} + +// ValidationError returns when validating a field or edge fails. +type ValidationError struct { + Name string // Field or edge name. + err error +} + +// Error implements the error interface. +func (e *ValidationError) Error() string { + return e.err.Error() +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ValidationError) Unwrap() error { + return e.err +} + +// IsValidationError returns a boolean indicating whether the error is a validation error. +func IsValidationError(err error) bool { + if err == nil { + return false + } + var e *ValidationError + return errors.As(err, &e) +} + +// NotFoundError returns when trying to fetch a specific entity and it was not found in the database. +type NotFoundError struct { + label string +} + +// Error implements the error interface. +func (e *NotFoundError) Error() string { + return "ent: " + e.label + " not found" +} + +// IsNotFound returns a boolean indicating whether the error is a not found error. +func IsNotFound(err error) bool { + if err == nil { + return false + } + var e *NotFoundError + return errors.As(err, &e) +} + +// MaskNotFound masks not found error. +func MaskNotFound(err error) error { + if IsNotFound(err) { + return nil + } + return err +} + +// NotSingularError returns when trying to fetch a singular entity and more then one was found in the database. +type NotSingularError struct { + label string +} + +// Error implements the error interface. +func (e *NotSingularError) Error() string { + return "ent: " + e.label + " not singular" +} + +// IsNotSingular returns a boolean indicating whether the error is a not singular error. +func IsNotSingular(err error) bool { + if err == nil { + return false + } + var e *NotSingularError + return errors.As(err, &e) +} + +// NotLoadedError returns when trying to get a node that was not loaded by the query. +type NotLoadedError struct { + edge string +} + +// Error implements the error interface. +func (e *NotLoadedError) Error() string { + return "ent: " + e.edge + " edge was not loaded" +} + +// IsNotLoaded returns a boolean indicating whether the error is a not loaded error. +func IsNotLoaded(err error) bool { + if err == nil { + return false + } + var e *NotLoadedError + return errors.As(err, &e) +} + +// ConstraintError returns when trying to create/update one or more entities and +// one or more of their constraints failed. For example, violation of edge or +// field uniqueness. +type ConstraintError struct { + msg string + wrap error +} + +// Error implements the error interface. +func (e ConstraintError) Error() string { + return "ent: constraint failed: " + e.msg +} + +// Unwrap implements the errors.Wrapper interface. +func (e *ConstraintError) Unwrap() error { + return e.wrap +} + +// IsConstraintError returns a boolean indicating whether the error is a constraint failure. +func IsConstraintError(err error) bool { + if err == nil { + return false + } + var e *ConstraintError + return errors.As(err, &e) +} + +// selector embedded by the different Select/GroupBy builders. +type selector struct { + label string + flds *[]string + fns []AggregateFunc + scan func(context.Context, any) error +} + +// ScanX is like Scan, but panics if an error occurs. +func (s *selector) ScanX(ctx context.Context, v any) { + if err := s.scan(ctx, v); err != nil { + panic(err) + } +} + +// Strings returns list of strings from a selector. It is only allowed when selecting one field. +func (s *selector) Strings(ctx context.Context) ([]string, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Strings is not achievable when selecting more than 1 field") + } + var v []string + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// StringsX is like Strings, but panics if an error occurs. +func (s *selector) StringsX(ctx context.Context) []string { + v, err := s.Strings(ctx) + if err != nil { + panic(err) + } + return v +} + +// String returns a single string from a selector. It is only allowed when selecting one field. +func (s *selector) String(ctx context.Context) (_ string, err error) { + var v []string + if v, err = s.Strings(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Strings returned %d results when one was expected", len(v)) + } + return +} + +// StringX is like String, but panics if an error occurs. +func (s *selector) StringX(ctx context.Context) string { + v, err := s.String(ctx) + if err != nil { + panic(err) + } + return v +} + +// Ints returns list of ints from a selector. It is only allowed when selecting one field. +func (s *selector) Ints(ctx context.Context) ([]int, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Ints is not achievable when selecting more than 1 field") + } + var v []int + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// IntsX is like Ints, but panics if an error occurs. +func (s *selector) IntsX(ctx context.Context) []int { + v, err := s.Ints(ctx) + if err != nil { + panic(err) + } + return v +} + +// Int returns a single int from a selector. It is only allowed when selecting one field. +func (s *selector) Int(ctx context.Context) (_ int, err error) { + var v []int + if v, err = s.Ints(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Ints returned %d results when one was expected", len(v)) + } + return +} + +// IntX is like Int, but panics if an error occurs. +func (s *selector) IntX(ctx context.Context) int { + v, err := s.Int(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64s returns list of float64s from a selector. It is only allowed when selecting one field. +func (s *selector) Float64s(ctx context.Context) ([]float64, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Float64s is not achievable when selecting more than 1 field") + } + var v []float64 + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// Float64sX is like Float64s, but panics if an error occurs. +func (s *selector) Float64sX(ctx context.Context) []float64 { + v, err := s.Float64s(ctx) + if err != nil { + panic(err) + } + return v +} + +// Float64 returns a single float64 from a selector. It is only allowed when selecting one field. +func (s *selector) Float64(ctx context.Context) (_ float64, err error) { + var v []float64 + if v, err = s.Float64s(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Float64s returned %d results when one was expected", len(v)) + } + return +} + +// Float64X is like Float64, but panics if an error occurs. +func (s *selector) Float64X(ctx context.Context) float64 { + v, err := s.Float64(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bools returns list of bools from a selector. It is only allowed when selecting one field. +func (s *selector) Bools(ctx context.Context) ([]bool, error) { + if len(*s.flds) > 1 { + return nil, errors.New("ent: Bools is not achievable when selecting more than 1 field") + } + var v []bool + if err := s.scan(ctx, &v); err != nil { + return nil, err + } + return v, nil +} + +// BoolsX is like Bools, but panics if an error occurs. +func (s *selector) BoolsX(ctx context.Context) []bool { + v, err := s.Bools(ctx) + if err != nil { + panic(err) + } + return v +} + +// Bool returns a single bool from a selector. It is only allowed when selecting one field. +func (s *selector) Bool(ctx context.Context) (_ bool, err error) { + var v []bool + if v, err = s.Bools(ctx); err != nil { + return + } + switch len(v) { + case 1: + return v[0], nil + case 0: + err = &NotFoundError{s.label} + default: + err = fmt.Errorf("ent: Bools returned %d results when one was expected", len(v)) + } + return +} + +// BoolX is like Bool, but panics if an error occurs. +func (s *selector) BoolX(ctx context.Context) bool { + v, err := s.Bool(ctx) + if err != nil { + panic(err) + } + return v +} + +// withHooks invokes the builder operation with the given hooks, if any. +func withHooks[V Value, M any, PM interface { + *M + Mutation +}](ctx context.Context, exec func(context.Context) (V, error), mutation PM, hooks []Hook) (value V, err error) { + if len(hooks) == 0 { + return exec(ctx) + } + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutationT, ok := any(m).(PM) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + // Set the mutation to the builder. + *mutation = *mutationT + return exec(ctx) + }) + for i := len(hooks) - 1; i >= 0; i-- { + if hooks[i] == nil { + return value, fmt.Errorf("ent: uninitialized hook (forgotten import ent/runtime?)") + } + mut = hooks[i](mut) + } + v, err := mut.Mutate(ctx, mutation) + if err != nil { + return value, err + } + nv, ok := v.(V) + if !ok { + return value, fmt.Errorf("unexpected node type %T returned from %T", v, mutation) + } + return nv, nil +} + +// setContextOp returns a new context with the given QueryContext attached (including its op) in case it does not exist. +func setContextOp(ctx context.Context, qc *QueryContext, op string) context.Context { + if ent.QueryFromContext(ctx) == nil { + qc.Op = op + ctx = ent.NewQueryContext(ctx, qc) + } + return ctx +} + +func querierAll[V Value, Q interface { + sqlAll(context.Context, ...queryHook) (V, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlAll(ctx) + }) +} + +func querierCount[Q interface { + sqlCount(context.Context) (int, error) +}]() Querier { + return QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + return query.sqlCount(ctx) + }) +} + +func withInterceptors[V Value](ctx context.Context, q Query, qr Querier, inters []Interceptor) (v V, err error) { + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + rv, err := qr.Query(ctx, q) + if err != nil { + return v, err + } + vt, ok := rv.(V) + if !ok { + return v, fmt.Errorf("unexpected type %T returned from %T. expected type: %T", vt, q, v) + } + return vt, nil +} + +func scanWithInterceptors[Q1 ent.Query, Q2 interface { + sqlScan(context.Context, Q1, any) error +}](ctx context.Context, rootQuery Q1, selectOrGroup Q2, inters []Interceptor, v any) error { + rv := reflect.ValueOf(v) + var qr Querier = QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + query, ok := q.(Q1) + if !ok { + return nil, fmt.Errorf("unexpected query type %T", q) + } + if err := selectOrGroup.sqlScan(ctx, query, v); err != nil { + return nil, err + } + if k := rv.Kind(); k == reflect.Pointer && rv.Elem().CanInterface() { + return rv.Elem().Interface(), nil + } + return v, nil + }) + for i := len(inters) - 1; i >= 0; i-- { + qr = inters[i].Intercept(qr) + } + vv, err := qr.Query(ctx, rootQuery) + if err != nil { + return err + } + switch rv2 := reflect.ValueOf(vv); { + case rv.IsNil(), rv2.IsNil(), rv.Kind() != reflect.Pointer: + case rv.Type() == rv2.Type(): + rv.Elem().Set(rv2.Elem()) + case rv.Elem().Type() == rv2.Type(): + rv.Elem().Set(rv2) + } + return nil +} + +// queryHook describes an internal hook for the different sqlAll methods. +type queryHook func(context.Context, *sqlgraph.QuerySpec) diff --git a/ent/enttest/enttest.go b/ent/enttest/enttest.go new file mode 100644 index 0000000..e863f07 --- /dev/null +++ b/ent/enttest/enttest.go @@ -0,0 +1,84 @@ +// Code generated by ent, DO NOT EDIT. + +package enttest + +import ( + "context" + + "github.com/lysand-org/versia-go/ent" + // required by schema hooks. + _ "github.com/lysand-org/versia-go/ent/runtime" + + "entgo.io/ent/dialect/sql/schema" + "github.com/lysand-org/versia-go/ent/migrate" +) + +type ( + // TestingT is the interface that is shared between + // testing.T and testing.B and used by enttest. + TestingT interface { + FailNow() + Error(...any) + } + + // Option configures client creation. + Option func(*options) + + options struct { + opts []ent.Option + migrateOpts []schema.MigrateOption + } +) + +// WithOptions forwards options to client creation. +func WithOptions(opts ...ent.Option) Option { + return func(o *options) { + o.opts = append(o.opts, opts...) + } +} + +// WithMigrateOptions forwards options to auto migration. +func WithMigrateOptions(opts ...schema.MigrateOption) Option { + return func(o *options) { + o.migrateOpts = append(o.migrateOpts, opts...) + } +} + +func newOptions(opts []Option) *options { + o := &options{} + for _, opt := range opts { + opt(o) + } + return o +} + +// Open calls ent.Open and auto-run migration. +func Open(t TestingT, driverName, dataSourceName string, opts ...Option) *ent.Client { + o := newOptions(opts) + c, err := ent.Open(driverName, dataSourceName, o.opts...) + if err != nil { + t.Error(err) + t.FailNow() + } + migrateSchema(t, c, o) + return c +} + +// NewClient calls ent.NewClient and auto-run migration. +func NewClient(t TestingT, opts ...Option) *ent.Client { + o := newOptions(opts) + c := ent.NewClient(o.opts...) + migrateSchema(t, c, o) + return c +} +func migrateSchema(t TestingT, c *ent.Client, o *options) { + tables, err := schema.CopyTables(migrate.Tables) + if err != nil { + t.Error(err) + t.FailNow() + } + if err := migrate.Create(context.Background(), c.Schema, tables, o.migrateOpts...); err != nil { + t.Error(err) + t.FailNow() + } +} diff --git a/ent/follow.go b/ent/follow.go new file mode 100644 index 0000000..cf007f2 --- /dev/null +++ b/ent/follow.go @@ -0,0 +1,237 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// Follow is the model entity for the Follow schema. +type Follow struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // IsRemote holds the value of the "isRemote" field. + IsRemote bool `json:"isRemote,omitempty"` + // URI holds the value of the "uri" field. + URI string `json:"uri,omitempty"` + // Extensions holds the value of the "extensions" field. + Extensions lysand.Extensions `json:"extensions,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Status holds the value of the "status" field. + Status follow.Status `json:"status,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the FollowQuery when eager-loading is set. + Edges FollowEdges `json:"edges"` + follow_follower *uuid.UUID + follow_followee *uuid.UUID + selectValues sql.SelectValues +} + +// FollowEdges holds the relations/edges for other nodes in the graph. +type FollowEdges struct { + // Follower holds the value of the follower edge. + Follower *User `json:"follower,omitempty"` + // Followee holds the value of the followee edge. + Followee *User `json:"followee,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// FollowerOrErr returns the Follower value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e FollowEdges) FollowerOrErr() (*User, error) { + if e.Follower != nil { + return e.Follower, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "follower"} +} + +// FolloweeOrErr returns the Followee value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e FollowEdges) FolloweeOrErr() (*User, error) { + if e.Followee != nil { + return e.Followee, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "followee"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Follow) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case follow.FieldExtensions: + values[i] = new([]byte) + case follow.FieldIsRemote: + values[i] = new(sql.NullBool) + case follow.FieldURI, follow.FieldStatus: + values[i] = new(sql.NullString) + case follow.FieldCreatedAt, follow.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case follow.FieldID: + values[i] = new(uuid.UUID) + case follow.ForeignKeys[0]: // follow_follower + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + case follow.ForeignKeys[1]: // follow_followee + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Follow fields. +func (f *Follow) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case follow.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + f.ID = *value + } + case follow.FieldIsRemote: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isRemote", values[i]) + } else if value.Valid { + f.IsRemote = value.Bool + } + case follow.FieldURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field uri", values[i]) + } else if value.Valid { + f.URI = value.String + } + case follow.FieldExtensions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extensions", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &f.Extensions); err != nil { + return fmt.Errorf("unmarshal field extensions: %w", err) + } + } + case follow.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + f.CreatedAt = value.Time + } + case follow.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + f.UpdatedAt = value.Time + } + case follow.FieldStatus: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field status", values[i]) + } else if value.Valid { + f.Status = follow.Status(value.String) + } + case follow.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field follow_follower", values[i]) + } else if value.Valid { + f.follow_follower = new(uuid.UUID) + *f.follow_follower = *value.S.(*uuid.UUID) + } + case follow.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field follow_followee", values[i]) + } else if value.Valid { + f.follow_followee = new(uuid.UUID) + *f.follow_followee = *value.S.(*uuid.UUID) + } + default: + f.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Follow. +// This includes values selected through modifiers, order, etc. +func (f *Follow) Value(name string) (ent.Value, error) { + return f.selectValues.Get(name) +} + +// QueryFollower queries the "follower" edge of the Follow entity. +func (f *Follow) QueryFollower() *UserQuery { + return NewFollowClient(f.config).QueryFollower(f) +} + +// QueryFollowee queries the "followee" edge of the Follow entity. +func (f *Follow) QueryFollowee() *UserQuery { + return NewFollowClient(f.config).QueryFollowee(f) +} + +// Update returns a builder for updating this Follow. +// Note that you need to call Follow.Unwrap() before calling this method if this Follow +// was returned from a transaction, and the transaction was committed or rolled back. +func (f *Follow) Update() *FollowUpdateOne { + return NewFollowClient(f.config).UpdateOne(f) +} + +// Unwrap unwraps the Follow entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (f *Follow) Unwrap() *Follow { + _tx, ok := f.config.driver.(*txDriver) + if !ok { + panic("ent: Follow is not a transactional entity") + } + f.config.driver = _tx.drv + return f +} + +// String implements the fmt.Stringer. +func (f *Follow) String() string { + var builder strings.Builder + builder.WriteString("Follow(") + builder.WriteString(fmt.Sprintf("id=%v, ", f.ID)) + builder.WriteString("isRemote=") + builder.WriteString(fmt.Sprintf("%v", f.IsRemote)) + builder.WriteString(", ") + builder.WriteString("uri=") + builder.WriteString(f.URI) + builder.WriteString(", ") + builder.WriteString("extensions=") + builder.WriteString(fmt.Sprintf("%v", f.Extensions)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(f.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(f.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("status=") + builder.WriteString(fmt.Sprintf("%v", f.Status)) + builder.WriteByte(')') + return builder.String() +} + +// Follows is a parsable slice of Follow. +type Follows []*Follow diff --git a/ent/follow/follow.go b/ent/follow/follow.go new file mode 100644 index 0000000..2599034 --- /dev/null +++ b/ent/follow/follow.go @@ -0,0 +1,187 @@ +// Code generated by ent, DO NOT EDIT. + +package follow + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const ( + // Label holds the string label denoting the follow type in the database. + Label = "follow" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldIsRemote holds the string denoting the isremote field in the database. + FieldIsRemote = "is_remote" + // FieldURI holds the string denoting the uri field in the database. + FieldURI = "uri" + // FieldExtensions holds the string denoting the extensions field in the database. + FieldExtensions = "extensions" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldStatus holds the string denoting the status field in the database. + FieldStatus = "status" + // EdgeFollower holds the string denoting the follower edge name in mutations. + EdgeFollower = "follower" + // EdgeFollowee holds the string denoting the followee edge name in mutations. + EdgeFollowee = "followee" + // Table holds the table name of the follow in the database. + Table = "follows" + // FollowerTable is the table that holds the follower relation/edge. + FollowerTable = "follows" + // FollowerInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + FollowerInverseTable = "users" + // FollowerColumn is the table column denoting the follower relation/edge. + FollowerColumn = "follow_follower" + // FolloweeTable is the table that holds the followee relation/edge. + FolloweeTable = "follows" + // FolloweeInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + FolloweeInverseTable = "users" + // FolloweeColumn is the table column denoting the followee relation/edge. + FolloweeColumn = "follow_followee" +) + +// Columns holds all SQL columns for follow fields. +var Columns = []string{ + FieldID, + FieldIsRemote, + FieldURI, + FieldExtensions, + FieldCreatedAt, + FieldUpdatedAt, + FieldStatus, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "follows" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "follow_follower", + "follow_followee", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // URIValidator is a validator for the "uri" field. It is called by the builders before save. + URIValidator func(string) error + // DefaultExtensions holds the default value on creation for the "extensions" field. + DefaultExtensions lysand.Extensions + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// Status defines the type for the "status" enum field. +type Status string + +// StatusPending is the default value of the Status enum. +const DefaultStatus = StatusPending + +// Status values. +const ( + StatusPending Status = "pending" + StatusAccepted Status = "accepted" +) + +func (s Status) String() string { + return string(s) +} + +// StatusValidator is a validator for the "status" field enum values. It is called by the builders before save. +func StatusValidator(s Status) error { + switch s { + case StatusPending, StatusAccepted: + return nil + default: + return fmt.Errorf("follow: invalid enum value for status field: %q", s) + } +} + +// OrderOption defines the ordering options for the Follow queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByIsRemote orders the results by the isRemote field. +func ByIsRemote(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsRemote, opts...).ToFunc() +} + +// ByURI orders the results by the uri field. +func ByURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURI, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByStatus orders the results by the status field. +func ByStatus(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldStatus, opts...).ToFunc() +} + +// ByFollowerField orders the results by follower field. +func ByFollowerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFollowerStep(), sql.OrderByField(field, opts...)) + } +} + +// ByFolloweeField orders the results by followee field. +func ByFolloweeField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFolloweeStep(), sql.OrderByField(field, opts...)) + } +} +func newFollowerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FollowerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FollowerTable, FollowerColumn), + ) +} +func newFolloweeStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FolloweeInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FolloweeTable, FolloweeColumn), + ) +} diff --git a/ent/follow/where.go b/ent/follow/where.go new file mode 100644 index 0000000..197a07b --- /dev/null +++ b/ent/follow/where.go @@ -0,0 +1,313 @@ +// Code generated by ent, DO NOT EDIT. + +package follow + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Follow { + return predicate.Follow(sql.FieldLTE(FieldID, id)) +} + +// IsRemote applies equality check predicate on the "isRemote" field. It's identical to IsRemoteEQ. +func IsRemote(v bool) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldIsRemote, v)) +} + +// URI applies equality check predicate on the "uri" field. It's identical to URIEQ. +func URI(v string) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldURI, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// IsRemoteEQ applies the EQ predicate on the "isRemote" field. +func IsRemoteEQ(v bool) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldIsRemote, v)) +} + +// IsRemoteNEQ applies the NEQ predicate on the "isRemote" field. +func IsRemoteNEQ(v bool) predicate.Follow { + return predicate.Follow(sql.FieldNEQ(FieldIsRemote, v)) +} + +// URIEQ applies the EQ predicate on the "uri" field. +func URIEQ(v string) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldURI, v)) +} + +// URINEQ applies the NEQ predicate on the "uri" field. +func URINEQ(v string) predicate.Follow { + return predicate.Follow(sql.FieldNEQ(FieldURI, v)) +} + +// URIIn applies the In predicate on the "uri" field. +func URIIn(vs ...string) predicate.Follow { + return predicate.Follow(sql.FieldIn(FieldURI, vs...)) +} + +// URINotIn applies the NotIn predicate on the "uri" field. +func URINotIn(vs ...string) predicate.Follow { + return predicate.Follow(sql.FieldNotIn(FieldURI, vs...)) +} + +// URIGT applies the GT predicate on the "uri" field. +func URIGT(v string) predicate.Follow { + return predicate.Follow(sql.FieldGT(FieldURI, v)) +} + +// URIGTE applies the GTE predicate on the "uri" field. +func URIGTE(v string) predicate.Follow { + return predicate.Follow(sql.FieldGTE(FieldURI, v)) +} + +// URILT applies the LT predicate on the "uri" field. +func URILT(v string) predicate.Follow { + return predicate.Follow(sql.FieldLT(FieldURI, v)) +} + +// URILTE applies the LTE predicate on the "uri" field. +func URILTE(v string) predicate.Follow { + return predicate.Follow(sql.FieldLTE(FieldURI, v)) +} + +// URIContains applies the Contains predicate on the "uri" field. +func URIContains(v string) predicate.Follow { + return predicate.Follow(sql.FieldContains(FieldURI, v)) +} + +// URIHasPrefix applies the HasPrefix predicate on the "uri" field. +func URIHasPrefix(v string) predicate.Follow { + return predicate.Follow(sql.FieldHasPrefix(FieldURI, v)) +} + +// URIHasSuffix applies the HasSuffix predicate on the "uri" field. +func URIHasSuffix(v string) predicate.Follow { + return predicate.Follow(sql.FieldHasSuffix(FieldURI, v)) +} + +// URIEqualFold applies the EqualFold predicate on the "uri" field. +func URIEqualFold(v string) predicate.Follow { + return predicate.Follow(sql.FieldEqualFold(FieldURI, v)) +} + +// URIContainsFold applies the ContainsFold predicate on the "uri" field. +func URIContainsFold(v string) predicate.Follow { + return predicate.Follow(sql.FieldContainsFold(FieldURI, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Follow { + return predicate.Follow(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Follow { + return predicate.Follow(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Follow { + return predicate.Follow(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Follow { + return predicate.Follow(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Follow { + return predicate.Follow(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// StatusEQ applies the EQ predicate on the "status" field. +func StatusEQ(v Status) predicate.Follow { + return predicate.Follow(sql.FieldEQ(FieldStatus, v)) +} + +// StatusNEQ applies the NEQ predicate on the "status" field. +func StatusNEQ(v Status) predicate.Follow { + return predicate.Follow(sql.FieldNEQ(FieldStatus, v)) +} + +// StatusIn applies the In predicate on the "status" field. +func StatusIn(vs ...Status) predicate.Follow { + return predicate.Follow(sql.FieldIn(FieldStatus, vs...)) +} + +// StatusNotIn applies the NotIn predicate on the "status" field. +func StatusNotIn(vs ...Status) predicate.Follow { + return predicate.Follow(sql.FieldNotIn(FieldStatus, vs...)) +} + +// HasFollower applies the HasEdge predicate on the "follower" edge. +func HasFollower() predicate.Follow { + return predicate.Follow(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FollowerTable, FollowerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasFollowerWith applies the HasEdge predicate on the "follower" edge with a given conditions (other predicates). +func HasFollowerWith(preds ...predicate.User) predicate.Follow { + return predicate.Follow(func(s *sql.Selector) { + step := newFollowerStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasFollowee applies the HasEdge predicate on the "followee" edge. +func HasFollowee() predicate.Follow { + return predicate.Follow(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FolloweeTable, FolloweeColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasFolloweeWith applies the HasEdge predicate on the "followee" edge with a given conditions (other predicates). +func HasFolloweeWith(preds ...predicate.User) predicate.Follow { + return predicate.Follow(func(s *sql.Selector) { + step := newFolloweeStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Follow) predicate.Follow { + return predicate.Follow(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Follow) predicate.Follow { + return predicate.Follow(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Follow) predicate.Follow { + return predicate.Follow(sql.NotPredicates(p)) +} diff --git a/ent/follow_create.go b/ent/follow_create.go new file mode 100644 index 0000000..83f123a --- /dev/null +++ b/ent/follow_create.go @@ -0,0 +1,854 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// FollowCreate is the builder for creating a Follow entity. +type FollowCreate struct { + config + mutation *FollowMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetIsRemote sets the "isRemote" field. +func (fc *FollowCreate) SetIsRemote(b bool) *FollowCreate { + fc.mutation.SetIsRemote(b) + return fc +} + +// SetURI sets the "uri" field. +func (fc *FollowCreate) SetURI(s string) *FollowCreate { + fc.mutation.SetURI(s) + return fc +} + +// SetExtensions sets the "extensions" field. +func (fc *FollowCreate) SetExtensions(l lysand.Extensions) *FollowCreate { + fc.mutation.SetExtensions(l) + return fc +} + +// SetCreatedAt sets the "created_at" field. +func (fc *FollowCreate) SetCreatedAt(t time.Time) *FollowCreate { + fc.mutation.SetCreatedAt(t) + return fc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (fc *FollowCreate) SetNillableCreatedAt(t *time.Time) *FollowCreate { + if t != nil { + fc.SetCreatedAt(*t) + } + return fc +} + +// SetUpdatedAt sets the "updated_at" field. +func (fc *FollowCreate) SetUpdatedAt(t time.Time) *FollowCreate { + fc.mutation.SetUpdatedAt(t) + return fc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (fc *FollowCreate) SetNillableUpdatedAt(t *time.Time) *FollowCreate { + if t != nil { + fc.SetUpdatedAt(*t) + } + return fc +} + +// SetStatus sets the "status" field. +func (fc *FollowCreate) SetStatus(f follow.Status) *FollowCreate { + fc.mutation.SetStatus(f) + return fc +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (fc *FollowCreate) SetNillableStatus(f *follow.Status) *FollowCreate { + if f != nil { + fc.SetStatus(*f) + } + return fc +} + +// SetID sets the "id" field. +func (fc *FollowCreate) SetID(u uuid.UUID) *FollowCreate { + fc.mutation.SetID(u) + return fc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (fc *FollowCreate) SetNillableID(u *uuid.UUID) *FollowCreate { + if u != nil { + fc.SetID(*u) + } + return fc +} + +// SetFollowerID sets the "follower" edge to the User entity by ID. +func (fc *FollowCreate) SetFollowerID(id uuid.UUID) *FollowCreate { + fc.mutation.SetFollowerID(id) + return fc +} + +// SetFollower sets the "follower" edge to the User entity. +func (fc *FollowCreate) SetFollower(u *User) *FollowCreate { + return fc.SetFollowerID(u.ID) +} + +// SetFolloweeID sets the "followee" edge to the User entity by ID. +func (fc *FollowCreate) SetFolloweeID(id uuid.UUID) *FollowCreate { + fc.mutation.SetFolloweeID(id) + return fc +} + +// SetFollowee sets the "followee" edge to the User entity. +func (fc *FollowCreate) SetFollowee(u *User) *FollowCreate { + return fc.SetFolloweeID(u.ID) +} + +// Mutation returns the FollowMutation object of the builder. +func (fc *FollowCreate) Mutation() *FollowMutation { + return fc.mutation +} + +// Save creates the Follow in the database. +func (fc *FollowCreate) Save(ctx context.Context) (*Follow, error) { + fc.defaults() + return withHooks(ctx, fc.sqlSave, fc.mutation, fc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (fc *FollowCreate) SaveX(ctx context.Context) *Follow { + v, err := fc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (fc *FollowCreate) Exec(ctx context.Context) error { + _, err := fc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (fc *FollowCreate) ExecX(ctx context.Context) { + if err := fc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (fc *FollowCreate) defaults() { + if _, ok := fc.mutation.Extensions(); !ok { + v := follow.DefaultExtensions + fc.mutation.SetExtensions(v) + } + if _, ok := fc.mutation.CreatedAt(); !ok { + v := follow.DefaultCreatedAt() + fc.mutation.SetCreatedAt(v) + } + if _, ok := fc.mutation.UpdatedAt(); !ok { + v := follow.DefaultUpdatedAt() + fc.mutation.SetUpdatedAt(v) + } + if _, ok := fc.mutation.Status(); !ok { + v := follow.DefaultStatus + fc.mutation.SetStatus(v) + } + if _, ok := fc.mutation.ID(); !ok { + v := follow.DefaultID() + fc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (fc *FollowCreate) check() error { + if _, ok := fc.mutation.IsRemote(); !ok { + return &ValidationError{Name: "isRemote", err: errors.New(`ent: missing required field "Follow.isRemote"`)} + } + if _, ok := fc.mutation.URI(); !ok { + return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "Follow.uri"`)} + } + if v, ok := fc.mutation.URI(); ok { + if err := follow.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Follow.uri": %w`, err)} + } + } + if _, ok := fc.mutation.Extensions(); !ok { + return &ValidationError{Name: "extensions", err: errors.New(`ent: missing required field "Follow.extensions"`)} + } + if _, ok := fc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Follow.created_at"`)} + } + if _, ok := fc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Follow.updated_at"`)} + } + if _, ok := fc.mutation.Status(); !ok { + return &ValidationError{Name: "status", err: errors.New(`ent: missing required field "Follow.status"`)} + } + if v, ok := fc.mutation.Status(); ok { + if err := follow.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Follow.status": %w`, err)} + } + } + if _, ok := fc.mutation.FollowerID(); !ok { + return &ValidationError{Name: "follower", err: errors.New(`ent: missing required edge "Follow.follower"`)} + } + if _, ok := fc.mutation.FolloweeID(); !ok { + return &ValidationError{Name: "followee", err: errors.New(`ent: missing required edge "Follow.followee"`)} + } + return nil +} + +func (fc *FollowCreate) sqlSave(ctx context.Context) (*Follow, error) { + if err := fc.check(); err != nil { + return nil, err + } + _node, _spec := fc.createSpec() + if err := sqlgraph.CreateNode(ctx, fc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + fc.mutation.id = &_node.ID + fc.mutation.done = true + return _node, nil +} + +func (fc *FollowCreate) createSpec() (*Follow, *sqlgraph.CreateSpec) { + var ( + _node = &Follow{config: fc.config} + _spec = sqlgraph.NewCreateSpec(follow.Table, sqlgraph.NewFieldSpec(follow.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = fc.conflict + if id, ok := fc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := fc.mutation.IsRemote(); ok { + _spec.SetField(follow.FieldIsRemote, field.TypeBool, value) + _node.IsRemote = value + } + if value, ok := fc.mutation.URI(); ok { + _spec.SetField(follow.FieldURI, field.TypeString, value) + _node.URI = value + } + if value, ok := fc.mutation.Extensions(); ok { + _spec.SetField(follow.FieldExtensions, field.TypeJSON, value) + _node.Extensions = value + } + if value, ok := fc.mutation.CreatedAt(); ok { + _spec.SetField(follow.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := fc.mutation.UpdatedAt(); ok { + _spec.SetField(follow.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := fc.mutation.Status(); ok { + _spec.SetField(follow.FieldStatus, field.TypeEnum, value) + _node.Status = value + } + if nodes := fc.mutation.FollowerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FollowerTable, + Columns: []string{follow.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.follow_follower = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := fc.mutation.FolloweeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FolloweeTable, + Columns: []string{follow.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.follow_followee = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Follow.Create(). +// SetIsRemote(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.FollowUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (fc *FollowCreate) OnConflict(opts ...sql.ConflictOption) *FollowUpsertOne { + fc.conflict = opts + return &FollowUpsertOne{ + create: fc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Follow.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (fc *FollowCreate) OnConflictColumns(columns ...string) *FollowUpsertOne { + fc.conflict = append(fc.conflict, sql.ConflictColumns(columns...)) + return &FollowUpsertOne{ + create: fc, + } +} + +type ( + // FollowUpsertOne is the builder for "upsert"-ing + // one Follow node. + FollowUpsertOne struct { + create *FollowCreate + } + + // FollowUpsert is the "OnConflict" setter. + FollowUpsert struct { + *sql.UpdateSet + } +) + +// SetIsRemote sets the "isRemote" field. +func (u *FollowUpsert) SetIsRemote(v bool) *FollowUpsert { + u.Set(follow.FieldIsRemote, v) + return u +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *FollowUpsert) UpdateIsRemote() *FollowUpsert { + u.SetExcluded(follow.FieldIsRemote) + return u +} + +// SetURI sets the "uri" field. +func (u *FollowUpsert) SetURI(v string) *FollowUpsert { + u.Set(follow.FieldURI, v) + return u +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *FollowUpsert) UpdateURI() *FollowUpsert { + u.SetExcluded(follow.FieldURI) + return u +} + +// SetExtensions sets the "extensions" field. +func (u *FollowUpsert) SetExtensions(v lysand.Extensions) *FollowUpsert { + u.Set(follow.FieldExtensions, v) + return u +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *FollowUpsert) UpdateExtensions() *FollowUpsert { + u.SetExcluded(follow.FieldExtensions) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *FollowUpsert) SetUpdatedAt(v time.Time) *FollowUpsert { + u.Set(follow.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *FollowUpsert) UpdateUpdatedAt() *FollowUpsert { + u.SetExcluded(follow.FieldUpdatedAt) + return u +} + +// SetStatus sets the "status" field. +func (u *FollowUpsert) SetStatus(v follow.Status) *FollowUpsert { + u.Set(follow.FieldStatus, v) + return u +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *FollowUpsert) UpdateStatus() *FollowUpsert { + u.SetExcluded(follow.FieldStatus) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.Follow.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(follow.FieldID) +// }), +// ). +// Exec(ctx) +func (u *FollowUpsertOne) UpdateNewValues() *FollowUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(follow.FieldID) + } + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(follow.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Follow.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *FollowUpsertOne) Ignore() *FollowUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *FollowUpsertOne) DoNothing() *FollowUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the FollowCreate.OnConflict +// documentation for more info. +func (u *FollowUpsertOne) Update(set func(*FollowUpsert)) *FollowUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&FollowUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *FollowUpsertOne) SetIsRemote(v bool) *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *FollowUpsertOne) UpdateIsRemote() *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *FollowUpsertOne) SetURI(v string) *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *FollowUpsertOne) UpdateURI() *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *FollowUpsertOne) SetExtensions(v lysand.Extensions) *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *FollowUpsertOne) UpdateExtensions() *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *FollowUpsertOne) SetUpdatedAt(v time.Time) *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *FollowUpsertOne) UpdateUpdatedAt() *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetStatus sets the "status" field. +func (u *FollowUpsertOne) SetStatus(v follow.Status) *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *FollowUpsertOne) UpdateStatus() *FollowUpsertOne { + return u.Update(func(s *FollowUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *FollowUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for FollowCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *FollowUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *FollowUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: FollowUpsertOne.ID is not supported by MySQL driver. Use FollowUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *FollowUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// FollowCreateBulk is the builder for creating many Follow entities in bulk. +type FollowCreateBulk struct { + config + err error + builders []*FollowCreate + conflict []sql.ConflictOption +} + +// Save creates the Follow entities in the database. +func (fcb *FollowCreateBulk) Save(ctx context.Context) ([]*Follow, error) { + if fcb.err != nil { + return nil, fcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(fcb.builders)) + nodes := make([]*Follow, len(fcb.builders)) + mutators := make([]Mutator, len(fcb.builders)) + for i := range fcb.builders { + func(i int, root context.Context) { + builder := fcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*FollowMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, fcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = fcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, fcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, fcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (fcb *FollowCreateBulk) SaveX(ctx context.Context) []*Follow { + v, err := fcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (fcb *FollowCreateBulk) Exec(ctx context.Context) error { + _, err := fcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (fcb *FollowCreateBulk) ExecX(ctx context.Context) { + if err := fcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Follow.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.FollowUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (fcb *FollowCreateBulk) OnConflict(opts ...sql.ConflictOption) *FollowUpsertBulk { + fcb.conflict = opts + return &FollowUpsertBulk{ + create: fcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Follow.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (fcb *FollowCreateBulk) OnConflictColumns(columns ...string) *FollowUpsertBulk { + fcb.conflict = append(fcb.conflict, sql.ConflictColumns(columns...)) + return &FollowUpsertBulk{ + create: fcb, + } +} + +// FollowUpsertBulk is the builder for "upsert"-ing +// a bulk of Follow nodes. +type FollowUpsertBulk struct { + create *FollowCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Follow.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(follow.FieldID) +// }), +// ). +// Exec(ctx) +func (u *FollowUpsertBulk) UpdateNewValues() *FollowUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(follow.FieldID) + } + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(follow.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Follow.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *FollowUpsertBulk) Ignore() *FollowUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *FollowUpsertBulk) DoNothing() *FollowUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the FollowCreateBulk.OnConflict +// documentation for more info. +func (u *FollowUpsertBulk) Update(set func(*FollowUpsert)) *FollowUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&FollowUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *FollowUpsertBulk) SetIsRemote(v bool) *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *FollowUpsertBulk) UpdateIsRemote() *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *FollowUpsertBulk) SetURI(v string) *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *FollowUpsertBulk) UpdateURI() *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *FollowUpsertBulk) SetExtensions(v lysand.Extensions) *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *FollowUpsertBulk) UpdateExtensions() *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *FollowUpsertBulk) SetUpdatedAt(v time.Time) *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *FollowUpsertBulk) UpdateUpdatedAt() *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetStatus sets the "status" field. +func (u *FollowUpsertBulk) SetStatus(v follow.Status) *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.SetStatus(v) + }) +} + +// UpdateStatus sets the "status" field to the value that was provided on create. +func (u *FollowUpsertBulk) UpdateStatus() *FollowUpsertBulk { + return u.Update(func(s *FollowUpsert) { + s.UpdateStatus() + }) +} + +// Exec executes the query. +func (u *FollowUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the FollowCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for FollowCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *FollowUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/follow_delete.go b/ent/follow_delete.go new file mode 100644 index 0000000..d8e451d --- /dev/null +++ b/ent/follow_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// FollowDelete is the builder for deleting a Follow entity. +type FollowDelete struct { + config + hooks []Hook + mutation *FollowMutation +} + +// Where appends a list predicates to the FollowDelete builder. +func (fd *FollowDelete) Where(ps ...predicate.Follow) *FollowDelete { + fd.mutation.Where(ps...) + return fd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (fd *FollowDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, fd.sqlExec, fd.mutation, fd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (fd *FollowDelete) ExecX(ctx context.Context) int { + n, err := fd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (fd *FollowDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(follow.Table, sqlgraph.NewFieldSpec(follow.FieldID, field.TypeUUID)) + if ps := fd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, fd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + fd.mutation.done = true + return affected, err +} + +// FollowDeleteOne is the builder for deleting a single Follow entity. +type FollowDeleteOne struct { + fd *FollowDelete +} + +// Where appends a list predicates to the FollowDelete builder. +func (fdo *FollowDeleteOne) Where(ps ...predicate.Follow) *FollowDeleteOne { + fdo.fd.mutation.Where(ps...) + return fdo +} + +// Exec executes the deletion query. +func (fdo *FollowDeleteOne) Exec(ctx context.Context) error { + n, err := fdo.fd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{follow.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (fdo *FollowDeleteOne) ExecX(ctx context.Context) { + if err := fdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/follow_query.go b/ent/follow_query.go new file mode 100644 index 0000000..10c9f58 --- /dev/null +++ b/ent/follow_query.go @@ -0,0 +1,688 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" +) + +// FollowQuery is the builder for querying Follow entities. +type FollowQuery struct { + config + ctx *QueryContext + order []follow.OrderOption + inters []Interceptor + predicates []predicate.Follow + withFollower *UserQuery + withFollowee *UserQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the FollowQuery builder. +func (fq *FollowQuery) Where(ps ...predicate.Follow) *FollowQuery { + fq.predicates = append(fq.predicates, ps...) + return fq +} + +// Limit the number of records to be returned by this query. +func (fq *FollowQuery) Limit(limit int) *FollowQuery { + fq.ctx.Limit = &limit + return fq +} + +// Offset to start from. +func (fq *FollowQuery) Offset(offset int) *FollowQuery { + fq.ctx.Offset = &offset + return fq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (fq *FollowQuery) Unique(unique bool) *FollowQuery { + fq.ctx.Unique = &unique + return fq +} + +// Order specifies how the records should be ordered. +func (fq *FollowQuery) Order(o ...follow.OrderOption) *FollowQuery { + fq.order = append(fq.order, o...) + return fq +} + +// QueryFollower chains the current query on the "follower" edge. +func (fq *FollowQuery) QueryFollower() *UserQuery { + query := (&UserClient{config: fq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := fq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := fq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(follow.Table, follow.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, follow.FollowerTable, follow.FollowerColumn), + ) + fromU = sqlgraph.SetNeighbors(fq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryFollowee chains the current query on the "followee" edge. +func (fq *FollowQuery) QueryFollowee() *UserQuery { + query := (&UserClient{config: fq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := fq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := fq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(follow.Table, follow.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, follow.FolloweeTable, follow.FolloweeColumn), + ) + fromU = sqlgraph.SetNeighbors(fq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Follow entity from the query. +// Returns a *NotFoundError when no Follow was found. +func (fq *FollowQuery) First(ctx context.Context) (*Follow, error) { + nodes, err := fq.Limit(1).All(setContextOp(ctx, fq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{follow.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (fq *FollowQuery) FirstX(ctx context.Context) *Follow { + node, err := fq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Follow ID from the query. +// Returns a *NotFoundError when no Follow ID was found. +func (fq *FollowQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = fq.Limit(1).IDs(setContextOp(ctx, fq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{follow.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (fq *FollowQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := fq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Follow entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Follow entity is found. +// Returns a *NotFoundError when no Follow entities are found. +func (fq *FollowQuery) Only(ctx context.Context) (*Follow, error) { + nodes, err := fq.Limit(2).All(setContextOp(ctx, fq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{follow.Label} + default: + return nil, &NotSingularError{follow.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (fq *FollowQuery) OnlyX(ctx context.Context) *Follow { + node, err := fq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Follow ID in the query. +// Returns a *NotSingularError when more than one Follow ID is found. +// Returns a *NotFoundError when no entities are found. +func (fq *FollowQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = fq.Limit(2).IDs(setContextOp(ctx, fq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{follow.Label} + default: + err = &NotSingularError{follow.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (fq *FollowQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := fq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Follows. +func (fq *FollowQuery) All(ctx context.Context) ([]*Follow, error) { + ctx = setContextOp(ctx, fq.ctx, "All") + if err := fq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Follow, *FollowQuery]() + return withInterceptors[[]*Follow](ctx, fq, qr, fq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (fq *FollowQuery) AllX(ctx context.Context) []*Follow { + nodes, err := fq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Follow IDs. +func (fq *FollowQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if fq.ctx.Unique == nil && fq.path != nil { + fq.Unique(true) + } + ctx = setContextOp(ctx, fq.ctx, "IDs") + if err = fq.Select(follow.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (fq *FollowQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := fq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (fq *FollowQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, fq.ctx, "Count") + if err := fq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, fq, querierCount[*FollowQuery](), fq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (fq *FollowQuery) CountX(ctx context.Context) int { + count, err := fq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (fq *FollowQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, fq.ctx, "Exist") + switch _, err := fq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (fq *FollowQuery) ExistX(ctx context.Context) bool { + exist, err := fq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the FollowQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (fq *FollowQuery) Clone() *FollowQuery { + if fq == nil { + return nil + } + return &FollowQuery{ + config: fq.config, + ctx: fq.ctx.Clone(), + order: append([]follow.OrderOption{}, fq.order...), + inters: append([]Interceptor{}, fq.inters...), + predicates: append([]predicate.Follow{}, fq.predicates...), + withFollower: fq.withFollower.Clone(), + withFollowee: fq.withFollowee.Clone(), + // clone intermediate query. + sql: fq.sql.Clone(), + path: fq.path, + } +} + +// WithFollower tells the query-builder to eager-load the nodes that are connected to +// the "follower" edge. The optional arguments are used to configure the query builder of the edge. +func (fq *FollowQuery) WithFollower(opts ...func(*UserQuery)) *FollowQuery { + query := (&UserClient{config: fq.config}).Query() + for _, opt := range opts { + opt(query) + } + fq.withFollower = query + return fq +} + +// WithFollowee tells the query-builder to eager-load the nodes that are connected to +// the "followee" edge. The optional arguments are used to configure the query builder of the edge. +func (fq *FollowQuery) WithFollowee(opts ...func(*UserQuery)) *FollowQuery { + query := (&UserClient{config: fq.config}).Query() + for _, opt := range opts { + opt(query) + } + fq.withFollowee = query + return fq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Follow.Query(). +// GroupBy(follow.FieldIsRemote). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (fq *FollowQuery) GroupBy(field string, fields ...string) *FollowGroupBy { + fq.ctx.Fields = append([]string{field}, fields...) + grbuild := &FollowGroupBy{build: fq} + grbuild.flds = &fq.ctx.Fields + grbuild.label = follow.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// } +// +// client.Follow.Query(). +// Select(follow.FieldIsRemote). +// Scan(ctx, &v) +func (fq *FollowQuery) Select(fields ...string) *FollowSelect { + fq.ctx.Fields = append(fq.ctx.Fields, fields...) + sbuild := &FollowSelect{FollowQuery: fq} + sbuild.label = follow.Label + sbuild.flds, sbuild.scan = &fq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a FollowSelect configured with the given aggregations. +func (fq *FollowQuery) Aggregate(fns ...AggregateFunc) *FollowSelect { + return fq.Select().Aggregate(fns...) +} + +func (fq *FollowQuery) prepareQuery(ctx context.Context) error { + for _, inter := range fq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, fq); err != nil { + return err + } + } + } + for _, f := range fq.ctx.Fields { + if !follow.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if fq.path != nil { + prev, err := fq.path(ctx) + if err != nil { + return err + } + fq.sql = prev + } + return nil +} + +func (fq *FollowQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Follow, error) { + var ( + nodes = []*Follow{} + withFKs = fq.withFKs + _spec = fq.querySpec() + loadedTypes = [2]bool{ + fq.withFollower != nil, + fq.withFollowee != nil, + } + ) + if fq.withFollower != nil || fq.withFollowee != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, follow.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Follow).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Follow{config: fq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, fq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := fq.withFollower; query != nil { + if err := fq.loadFollower(ctx, query, nodes, nil, + func(n *Follow, e *User) { n.Edges.Follower = e }); err != nil { + return nil, err + } + } + if query := fq.withFollowee; query != nil { + if err := fq.loadFollowee(ctx, query, nodes, nil, + func(n *Follow, e *User) { n.Edges.Followee = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (fq *FollowQuery) loadFollower(ctx context.Context, query *UserQuery, nodes []*Follow, init func(*Follow), assign func(*Follow, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Follow) + for i := range nodes { + if nodes[i].follow_follower == nil { + continue + } + fk := *nodes[i].follow_follower + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "follow_follower" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (fq *FollowQuery) loadFollowee(ctx context.Context, query *UserQuery, nodes []*Follow, init func(*Follow), assign func(*Follow, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Follow) + for i := range nodes { + if nodes[i].follow_followee == nil { + continue + } + fk := *nodes[i].follow_followee + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "follow_followee" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (fq *FollowQuery) sqlCount(ctx context.Context) (int, error) { + _spec := fq.querySpec() + _spec.Node.Columns = fq.ctx.Fields + if len(fq.ctx.Fields) > 0 { + _spec.Unique = fq.ctx.Unique != nil && *fq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, fq.driver, _spec) +} + +func (fq *FollowQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(follow.Table, follow.Columns, sqlgraph.NewFieldSpec(follow.FieldID, field.TypeUUID)) + _spec.From = fq.sql + if unique := fq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if fq.path != nil { + _spec.Unique = true + } + if fields := fq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, follow.FieldID) + for i := range fields { + if fields[i] != follow.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := fq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := fq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := fq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := fq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (fq *FollowQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(fq.driver.Dialect()) + t1 := builder.Table(follow.Table) + columns := fq.ctx.Fields + if len(columns) == 0 { + columns = follow.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if fq.sql != nil { + selector = fq.sql + selector.Select(selector.Columns(columns...)...) + } + if fq.ctx.Unique != nil && *fq.ctx.Unique { + selector.Distinct() + } + for _, p := range fq.predicates { + p(selector) + } + for _, p := range fq.order { + p(selector) + } + if offset := fq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := fq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// FollowGroupBy is the group-by builder for Follow entities. +type FollowGroupBy struct { + selector + build *FollowQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (fgb *FollowGroupBy) Aggregate(fns ...AggregateFunc) *FollowGroupBy { + fgb.fns = append(fgb.fns, fns...) + return fgb +} + +// Scan applies the selector query and scans the result into the given value. +func (fgb *FollowGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fgb.build.ctx, "GroupBy") + if err := fgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*FollowQuery, *FollowGroupBy](ctx, fgb.build, fgb, fgb.build.inters, v) +} + +func (fgb *FollowGroupBy) sqlScan(ctx context.Context, root *FollowQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(fgb.fns)) + for _, fn := range fgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*fgb.flds)+len(fgb.fns)) + for _, f := range *fgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*fgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := fgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// FollowSelect is the builder for selecting fields of Follow entities. +type FollowSelect struct { + *FollowQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (fs *FollowSelect) Aggregate(fns ...AggregateFunc) *FollowSelect { + fs.fns = append(fs.fns, fns...) + return fs +} + +// Scan applies the selector query and scans the result into the given value. +func (fs *FollowSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, fs.ctx, "Select") + if err := fs.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*FollowQuery, *FollowSelect](ctx, fs.FollowQuery, fs, fs.inters, v) +} + +func (fs *FollowSelect) sqlScan(ctx context.Context, root *FollowQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(fs.fns)) + for _, fn := range fs.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*fs.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := fs.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/follow_update.go b/ent/follow_update.go new file mode 100644 index 0000000..91fbe9c --- /dev/null +++ b/ent/follow_update.go @@ -0,0 +1,567 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// FollowUpdate is the builder for updating Follow entities. +type FollowUpdate struct { + config + hooks []Hook + mutation *FollowMutation +} + +// Where appends a list predicates to the FollowUpdate builder. +func (fu *FollowUpdate) Where(ps ...predicate.Follow) *FollowUpdate { + fu.mutation.Where(ps...) + return fu +} + +// SetIsRemote sets the "isRemote" field. +func (fu *FollowUpdate) SetIsRemote(b bool) *FollowUpdate { + fu.mutation.SetIsRemote(b) + return fu +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (fu *FollowUpdate) SetNillableIsRemote(b *bool) *FollowUpdate { + if b != nil { + fu.SetIsRemote(*b) + } + return fu +} + +// SetURI sets the "uri" field. +func (fu *FollowUpdate) SetURI(s string) *FollowUpdate { + fu.mutation.SetURI(s) + return fu +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (fu *FollowUpdate) SetNillableURI(s *string) *FollowUpdate { + if s != nil { + fu.SetURI(*s) + } + return fu +} + +// SetExtensions sets the "extensions" field. +func (fu *FollowUpdate) SetExtensions(l lysand.Extensions) *FollowUpdate { + fu.mutation.SetExtensions(l) + return fu +} + +// SetUpdatedAt sets the "updated_at" field. +func (fu *FollowUpdate) SetUpdatedAt(t time.Time) *FollowUpdate { + fu.mutation.SetUpdatedAt(t) + return fu +} + +// SetStatus sets the "status" field. +func (fu *FollowUpdate) SetStatus(f follow.Status) *FollowUpdate { + fu.mutation.SetStatus(f) + return fu +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (fu *FollowUpdate) SetNillableStatus(f *follow.Status) *FollowUpdate { + if f != nil { + fu.SetStatus(*f) + } + return fu +} + +// SetFollowerID sets the "follower" edge to the User entity by ID. +func (fu *FollowUpdate) SetFollowerID(id uuid.UUID) *FollowUpdate { + fu.mutation.SetFollowerID(id) + return fu +} + +// SetFollower sets the "follower" edge to the User entity. +func (fu *FollowUpdate) SetFollower(u *User) *FollowUpdate { + return fu.SetFollowerID(u.ID) +} + +// SetFolloweeID sets the "followee" edge to the User entity by ID. +func (fu *FollowUpdate) SetFolloweeID(id uuid.UUID) *FollowUpdate { + fu.mutation.SetFolloweeID(id) + return fu +} + +// SetFollowee sets the "followee" edge to the User entity. +func (fu *FollowUpdate) SetFollowee(u *User) *FollowUpdate { + return fu.SetFolloweeID(u.ID) +} + +// Mutation returns the FollowMutation object of the builder. +func (fu *FollowUpdate) Mutation() *FollowMutation { + return fu.mutation +} + +// ClearFollower clears the "follower" edge to the User entity. +func (fu *FollowUpdate) ClearFollower() *FollowUpdate { + fu.mutation.ClearFollower() + return fu +} + +// ClearFollowee clears the "followee" edge to the User entity. +func (fu *FollowUpdate) ClearFollowee() *FollowUpdate { + fu.mutation.ClearFollowee() + return fu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (fu *FollowUpdate) Save(ctx context.Context) (int, error) { + fu.defaults() + return withHooks(ctx, fu.sqlSave, fu.mutation, fu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (fu *FollowUpdate) SaveX(ctx context.Context) int { + affected, err := fu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (fu *FollowUpdate) Exec(ctx context.Context) error { + _, err := fu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (fu *FollowUpdate) ExecX(ctx context.Context) { + if err := fu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (fu *FollowUpdate) defaults() { + if _, ok := fu.mutation.UpdatedAt(); !ok { + v := follow.UpdateDefaultUpdatedAt() + fu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (fu *FollowUpdate) check() error { + if v, ok := fu.mutation.URI(); ok { + if err := follow.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Follow.uri": %w`, err)} + } + } + if v, ok := fu.mutation.Status(); ok { + if err := follow.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Follow.status": %w`, err)} + } + } + if _, ok := fu.mutation.FollowerID(); fu.mutation.FollowerCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Follow.follower"`) + } + if _, ok := fu.mutation.FolloweeID(); fu.mutation.FolloweeCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Follow.followee"`) + } + return nil +} + +func (fu *FollowUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := fu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(follow.Table, follow.Columns, sqlgraph.NewFieldSpec(follow.FieldID, field.TypeUUID)) + if ps := fu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := fu.mutation.IsRemote(); ok { + _spec.SetField(follow.FieldIsRemote, field.TypeBool, value) + } + if value, ok := fu.mutation.URI(); ok { + _spec.SetField(follow.FieldURI, field.TypeString, value) + } + if value, ok := fu.mutation.Extensions(); ok { + _spec.SetField(follow.FieldExtensions, field.TypeJSON, value) + } + if value, ok := fu.mutation.UpdatedAt(); ok { + _spec.SetField(follow.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := fu.mutation.Status(); ok { + _spec.SetField(follow.FieldStatus, field.TypeEnum, value) + } + if fu.mutation.FollowerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FollowerTable, + Columns: []string{follow.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := fu.mutation.FollowerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FollowerTable, + Columns: []string{follow.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if fu.mutation.FolloweeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FolloweeTable, + Columns: []string{follow.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := fu.mutation.FolloweeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FolloweeTable, + Columns: []string{follow.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, fu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{follow.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + fu.mutation.done = true + return n, nil +} + +// FollowUpdateOne is the builder for updating a single Follow entity. +type FollowUpdateOne struct { + config + fields []string + hooks []Hook + mutation *FollowMutation +} + +// SetIsRemote sets the "isRemote" field. +func (fuo *FollowUpdateOne) SetIsRemote(b bool) *FollowUpdateOne { + fuo.mutation.SetIsRemote(b) + return fuo +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (fuo *FollowUpdateOne) SetNillableIsRemote(b *bool) *FollowUpdateOne { + if b != nil { + fuo.SetIsRemote(*b) + } + return fuo +} + +// SetURI sets the "uri" field. +func (fuo *FollowUpdateOne) SetURI(s string) *FollowUpdateOne { + fuo.mutation.SetURI(s) + return fuo +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (fuo *FollowUpdateOne) SetNillableURI(s *string) *FollowUpdateOne { + if s != nil { + fuo.SetURI(*s) + } + return fuo +} + +// SetExtensions sets the "extensions" field. +func (fuo *FollowUpdateOne) SetExtensions(l lysand.Extensions) *FollowUpdateOne { + fuo.mutation.SetExtensions(l) + return fuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (fuo *FollowUpdateOne) SetUpdatedAt(t time.Time) *FollowUpdateOne { + fuo.mutation.SetUpdatedAt(t) + return fuo +} + +// SetStatus sets the "status" field. +func (fuo *FollowUpdateOne) SetStatus(f follow.Status) *FollowUpdateOne { + fuo.mutation.SetStatus(f) + return fuo +} + +// SetNillableStatus sets the "status" field if the given value is not nil. +func (fuo *FollowUpdateOne) SetNillableStatus(f *follow.Status) *FollowUpdateOne { + if f != nil { + fuo.SetStatus(*f) + } + return fuo +} + +// SetFollowerID sets the "follower" edge to the User entity by ID. +func (fuo *FollowUpdateOne) SetFollowerID(id uuid.UUID) *FollowUpdateOne { + fuo.mutation.SetFollowerID(id) + return fuo +} + +// SetFollower sets the "follower" edge to the User entity. +func (fuo *FollowUpdateOne) SetFollower(u *User) *FollowUpdateOne { + return fuo.SetFollowerID(u.ID) +} + +// SetFolloweeID sets the "followee" edge to the User entity by ID. +func (fuo *FollowUpdateOne) SetFolloweeID(id uuid.UUID) *FollowUpdateOne { + fuo.mutation.SetFolloweeID(id) + return fuo +} + +// SetFollowee sets the "followee" edge to the User entity. +func (fuo *FollowUpdateOne) SetFollowee(u *User) *FollowUpdateOne { + return fuo.SetFolloweeID(u.ID) +} + +// Mutation returns the FollowMutation object of the builder. +func (fuo *FollowUpdateOne) Mutation() *FollowMutation { + return fuo.mutation +} + +// ClearFollower clears the "follower" edge to the User entity. +func (fuo *FollowUpdateOne) ClearFollower() *FollowUpdateOne { + fuo.mutation.ClearFollower() + return fuo +} + +// ClearFollowee clears the "followee" edge to the User entity. +func (fuo *FollowUpdateOne) ClearFollowee() *FollowUpdateOne { + fuo.mutation.ClearFollowee() + return fuo +} + +// Where appends a list predicates to the FollowUpdate builder. +func (fuo *FollowUpdateOne) Where(ps ...predicate.Follow) *FollowUpdateOne { + fuo.mutation.Where(ps...) + return fuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (fuo *FollowUpdateOne) Select(field string, fields ...string) *FollowUpdateOne { + fuo.fields = append([]string{field}, fields...) + return fuo +} + +// Save executes the query and returns the updated Follow entity. +func (fuo *FollowUpdateOne) Save(ctx context.Context) (*Follow, error) { + fuo.defaults() + return withHooks(ctx, fuo.sqlSave, fuo.mutation, fuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (fuo *FollowUpdateOne) SaveX(ctx context.Context) *Follow { + node, err := fuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (fuo *FollowUpdateOne) Exec(ctx context.Context) error { + _, err := fuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (fuo *FollowUpdateOne) ExecX(ctx context.Context) { + if err := fuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (fuo *FollowUpdateOne) defaults() { + if _, ok := fuo.mutation.UpdatedAt(); !ok { + v := follow.UpdateDefaultUpdatedAt() + fuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (fuo *FollowUpdateOne) check() error { + if v, ok := fuo.mutation.URI(); ok { + if err := follow.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Follow.uri": %w`, err)} + } + } + if v, ok := fuo.mutation.Status(); ok { + if err := follow.StatusValidator(v); err != nil { + return &ValidationError{Name: "status", err: fmt.Errorf(`ent: validator failed for field "Follow.status": %w`, err)} + } + } + if _, ok := fuo.mutation.FollowerID(); fuo.mutation.FollowerCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Follow.follower"`) + } + if _, ok := fuo.mutation.FolloweeID(); fuo.mutation.FolloweeCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Follow.followee"`) + } + return nil +} + +func (fuo *FollowUpdateOne) sqlSave(ctx context.Context) (_node *Follow, err error) { + if err := fuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(follow.Table, follow.Columns, sqlgraph.NewFieldSpec(follow.FieldID, field.TypeUUID)) + id, ok := fuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Follow.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := fuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, follow.FieldID) + for _, f := range fields { + if !follow.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != follow.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := fuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := fuo.mutation.IsRemote(); ok { + _spec.SetField(follow.FieldIsRemote, field.TypeBool, value) + } + if value, ok := fuo.mutation.URI(); ok { + _spec.SetField(follow.FieldURI, field.TypeString, value) + } + if value, ok := fuo.mutation.Extensions(); ok { + _spec.SetField(follow.FieldExtensions, field.TypeJSON, value) + } + if value, ok := fuo.mutation.UpdatedAt(); ok { + _spec.SetField(follow.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := fuo.mutation.Status(); ok { + _spec.SetField(follow.FieldStatus, field.TypeEnum, value) + } + if fuo.mutation.FollowerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FollowerTable, + Columns: []string{follow.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := fuo.mutation.FollowerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FollowerTable, + Columns: []string{follow.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if fuo.mutation.FolloweeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FolloweeTable, + Columns: []string{follow.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := fuo.mutation.FolloweeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: follow.FolloweeTable, + Columns: []string{follow.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Follow{config: fuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, fuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{follow.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + fuo.mutation.done = true + return _node, nil +} diff --git a/ent/generate.go b/ent/generate.go new file mode 100644 index 0000000..a76cba7 --- /dev/null +++ b/ent/generate.go @@ -0,0 +1,3 @@ +package ent + +//go:generate go run -mod=mod entgo.io/ent/cmd/ent generate --feature schema/snapshot --feature sql/upsert ./schema diff --git a/ent/hook/hook.go b/ent/hook/hook.go new file mode 100644 index 0000000..5acd047 --- /dev/null +++ b/ent/hook/hook.go @@ -0,0 +1,259 @@ +// Code generated by ent, DO NOT EDIT. + +package hook + +import ( + "context" + "fmt" + + "github.com/lysand-org/versia-go/ent" +) + +// The AttachmentFunc type is an adapter to allow the use of ordinary +// function as Attachment mutator. +type AttachmentFunc func(context.Context, *ent.AttachmentMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f AttachmentFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.AttachmentMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.AttachmentMutation", m) +} + +// The FollowFunc type is an adapter to allow the use of ordinary +// function as Follow mutator. +type FollowFunc func(context.Context, *ent.FollowMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f FollowFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.FollowMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.FollowMutation", m) +} + +// The ImageFunc type is an adapter to allow the use of ordinary +// function as Image mutator. +type ImageFunc func(context.Context, *ent.ImageMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ImageFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ImageMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ImageMutation", m) +} + +// The NoteFunc type is an adapter to allow the use of ordinary +// function as Note mutator. +type NoteFunc func(context.Context, *ent.NoteMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f NoteFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.NoteMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.NoteMutation", m) +} + +// The ServerMetadataFunc type is an adapter to allow the use of ordinary +// function as ServerMetadata mutator. +type ServerMetadataFunc func(context.Context, *ent.ServerMetadataMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f ServerMetadataFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.ServerMetadataMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.ServerMetadataMutation", m) +} + +// The UserFunc type is an adapter to allow the use of ordinary +// function as User mutator. +type UserFunc func(context.Context, *ent.UserMutation) (ent.Value, error) + +// Mutate calls f(ctx, m). +func (f UserFunc) Mutate(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if mv, ok := m.(*ent.UserMutation); ok { + return f(ctx, mv) + } + return nil, fmt.Errorf("unexpected mutation type %T. expect *ent.UserMutation", m) +} + +// Condition is a hook condition function. +type Condition func(context.Context, ent.Mutation) bool + +// And groups conditions with the AND operator. +func And(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if !first(ctx, m) || !second(ctx, m) { + return false + } + for _, cond := range rest { + if !cond(ctx, m) { + return false + } + } + return true + } +} + +// Or groups conditions with the OR operator. +func Or(first, second Condition, rest ...Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + if first(ctx, m) || second(ctx, m) { + return true + } + for _, cond := range rest { + if cond(ctx, m) { + return true + } + } + return false + } +} + +// Not negates a given condition. +func Not(cond Condition) Condition { + return func(ctx context.Context, m ent.Mutation) bool { + return !cond(ctx, m) + } +} + +// HasOp is a condition testing mutation operation. +func HasOp(op ent.Op) Condition { + return func(_ context.Context, m ent.Mutation) bool { + return m.Op().Is(op) + } +} + +// HasAddedFields is a condition validating `.AddedField` on fields. +func HasAddedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.AddedField(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.AddedField(field); !exists { + return false + } + } + return true + } +} + +// HasClearedFields is a condition validating `.FieldCleared` on fields. +func HasClearedFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if exists := m.FieldCleared(field); !exists { + return false + } + for _, field := range fields { + if exists := m.FieldCleared(field); !exists { + return false + } + } + return true + } +} + +// HasFields is a condition validating `.Field` on fields. +func HasFields(field string, fields ...string) Condition { + return func(_ context.Context, m ent.Mutation) bool { + if _, exists := m.Field(field); !exists { + return false + } + for _, field := range fields { + if _, exists := m.Field(field); !exists { + return false + } + } + return true + } +} + +// If executes the given hook under condition. +// +// hook.If(ComputeAverage, And(HasFields(...), HasAddedFields(...))) +func If(hk ent.Hook, cond Condition) ent.Hook { + return func(next ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(ctx context.Context, m ent.Mutation) (ent.Value, error) { + if cond(ctx, m) { + return hk(next).Mutate(ctx, m) + } + return next.Mutate(ctx, m) + }) + } +} + +// On executes the given hook only for the given operation. +// +// hook.On(Log, ent.Delete|ent.Create) +func On(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, HasOp(op)) +} + +// Unless skips the given hook only for the given operation. +// +// hook.Unless(Log, ent.Update|ent.UpdateOne) +func Unless(hk ent.Hook, op ent.Op) ent.Hook { + return If(hk, Not(HasOp(op))) +} + +// FixedError is a hook returning a fixed error. +func FixedError(err error) ent.Hook { + return func(ent.Mutator) ent.Mutator { + return ent.MutateFunc(func(context.Context, ent.Mutation) (ent.Value, error) { + return nil, err + }) + } +} + +// Reject returns a hook that rejects all operations that match op. +// +// func (T) Hooks() []ent.Hook { +// return []ent.Hook{ +// Reject(ent.Delete|ent.Update), +// } +// } +func Reject(op ent.Op) ent.Hook { + hk := FixedError(fmt.Errorf("%s operation is not allowed", op)) + return On(hk, op) +} + +// Chain acts as a list of hooks and is effectively immutable. +// Once created, it will always hold the same set of hooks in the same order. +type Chain struct { + hooks []ent.Hook +} + +// NewChain creates a new chain of hooks. +func NewChain(hooks ...ent.Hook) Chain { + return Chain{append([]ent.Hook(nil), hooks...)} +} + +// Hook chains the list of hooks and returns the final hook. +func (c Chain) Hook() ent.Hook { + return func(mutator ent.Mutator) ent.Mutator { + for i := len(c.hooks) - 1; i >= 0; i-- { + mutator = c.hooks[i](mutator) + } + return mutator + } +} + +// Append extends a chain, adding the specified hook +// as the last ones in the mutation flow. +func (c Chain) Append(hooks ...ent.Hook) Chain { + newHooks := make([]ent.Hook, 0, len(c.hooks)+len(hooks)) + newHooks = append(newHooks, c.hooks...) + newHooks = append(newHooks, hooks...) + return Chain{newHooks} +} + +// Extend extends a chain, adding the specified chain +// as the last ones in the mutation flow. +func (c Chain) Extend(chain Chain) Chain { + return c.Append(chain.hooks...) +} diff --git a/ent/image.go b/ent/image.go new file mode 100644 index 0000000..9efa102 --- /dev/null +++ b/ent/image.go @@ -0,0 +1,114 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "fmt" + "strings" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/lysand-org/versia-go/ent/image" +) + +// Image is the model entity for the Image schema. +type Image struct { + config `json:"-"` + // ID of the ent. + ID int `json:"id,omitempty"` + // URL holds the value of the "url" field. + URL string `json:"url,omitempty"` + // MimeType holds the value of the "mimeType" field. + MimeType string `json:"mimeType,omitempty"` + selectValues sql.SelectValues +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Image) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case image.FieldID: + values[i] = new(sql.NullInt64) + case image.FieldURL, image.FieldMimeType: + values[i] = new(sql.NullString) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Image fields. +func (i *Image) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for j := range columns { + switch columns[j] { + case image.FieldID: + value, ok := values[j].(*sql.NullInt64) + if !ok { + return fmt.Errorf("unexpected type %T for field id", value) + } + i.ID = int(value.Int64) + case image.FieldURL: + if value, ok := values[j].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field url", values[j]) + } else if value.Valid { + i.URL = value.String + } + case image.FieldMimeType: + if value, ok := values[j].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field mimeType", values[j]) + } else if value.Valid { + i.MimeType = value.String + } + default: + i.selectValues.Set(columns[j], values[j]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Image. +// This includes values selected through modifiers, order, etc. +func (i *Image) Value(name string) (ent.Value, error) { + return i.selectValues.Get(name) +} + +// Update returns a builder for updating this Image. +// Note that you need to call Image.Unwrap() before calling this method if this Image +// was returned from a transaction, and the transaction was committed or rolled back. +func (i *Image) Update() *ImageUpdateOne { + return NewImageClient(i.config).UpdateOne(i) +} + +// Unwrap unwraps the Image entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (i *Image) Unwrap() *Image { + _tx, ok := i.config.driver.(*txDriver) + if !ok { + panic("ent: Image is not a transactional entity") + } + i.config.driver = _tx.drv + return i +} + +// String implements the fmt.Stringer. +func (i *Image) String() string { + var builder strings.Builder + builder.WriteString("Image(") + builder.WriteString(fmt.Sprintf("id=%v, ", i.ID)) + builder.WriteString("url=") + builder.WriteString(i.URL) + builder.WriteString(", ") + builder.WriteString("mimeType=") + builder.WriteString(i.MimeType) + builder.WriteByte(')') + return builder.String() +} + +// Images is a parsable slice of Image. +type Images []*Image diff --git a/ent/image/image.go b/ent/image/image.go new file mode 100644 index 0000000..fa328aa --- /dev/null +++ b/ent/image/image.go @@ -0,0 +1,55 @@ +// Code generated by ent, DO NOT EDIT. + +package image + +import ( + "entgo.io/ent/dialect/sql" +) + +const ( + // Label holds the string label denoting the image type in the database. + Label = "image" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldURL holds the string denoting the url field in the database. + FieldURL = "url" + // FieldMimeType holds the string denoting the mimetype field in the database. + FieldMimeType = "mime_type" + // Table holds the table name of the image in the database. + Table = "images" +) + +// Columns holds all SQL columns for image fields. +var Columns = []string{ + FieldID, + FieldURL, + FieldMimeType, +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + return false +} + +// OrderOption defines the ordering options for the Image queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByURL orders the results by the url field. +func ByURL(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURL, opts...).ToFunc() +} + +// ByMimeType orders the results by the mimeType field. +func ByMimeType(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldMimeType, opts...).ToFunc() +} diff --git a/ent/image/where.go b/ent/image/where.go new file mode 100644 index 0000000..0dc9ea4 --- /dev/null +++ b/ent/image/where.go @@ -0,0 +1,208 @@ +// Code generated by ent, DO NOT EDIT. + +package image + +import ( + "entgo.io/ent/dialect/sql" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id int) predicate.Image { + return predicate.Image(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id int) predicate.Image { + return predicate.Image(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id int) predicate.Image { + return predicate.Image(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...int) predicate.Image { + return predicate.Image(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...int) predicate.Image { + return predicate.Image(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id int) predicate.Image { + return predicate.Image(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id int) predicate.Image { + return predicate.Image(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id int) predicate.Image { + return predicate.Image(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id int) predicate.Image { + return predicate.Image(sql.FieldLTE(FieldID, id)) +} + +// URL applies equality check predicate on the "url" field. It's identical to URLEQ. +func URL(v string) predicate.Image { + return predicate.Image(sql.FieldEQ(FieldURL, v)) +} + +// MimeType applies equality check predicate on the "mimeType" field. It's identical to MimeTypeEQ. +func MimeType(v string) predicate.Image { + return predicate.Image(sql.FieldEQ(FieldMimeType, v)) +} + +// URLEQ applies the EQ predicate on the "url" field. +func URLEQ(v string) predicate.Image { + return predicate.Image(sql.FieldEQ(FieldURL, v)) +} + +// URLNEQ applies the NEQ predicate on the "url" field. +func URLNEQ(v string) predicate.Image { + return predicate.Image(sql.FieldNEQ(FieldURL, v)) +} + +// URLIn applies the In predicate on the "url" field. +func URLIn(vs ...string) predicate.Image { + return predicate.Image(sql.FieldIn(FieldURL, vs...)) +} + +// URLNotIn applies the NotIn predicate on the "url" field. +func URLNotIn(vs ...string) predicate.Image { + return predicate.Image(sql.FieldNotIn(FieldURL, vs...)) +} + +// URLGT applies the GT predicate on the "url" field. +func URLGT(v string) predicate.Image { + return predicate.Image(sql.FieldGT(FieldURL, v)) +} + +// URLGTE applies the GTE predicate on the "url" field. +func URLGTE(v string) predicate.Image { + return predicate.Image(sql.FieldGTE(FieldURL, v)) +} + +// URLLT applies the LT predicate on the "url" field. +func URLLT(v string) predicate.Image { + return predicate.Image(sql.FieldLT(FieldURL, v)) +} + +// URLLTE applies the LTE predicate on the "url" field. +func URLLTE(v string) predicate.Image { + return predicate.Image(sql.FieldLTE(FieldURL, v)) +} + +// URLContains applies the Contains predicate on the "url" field. +func URLContains(v string) predicate.Image { + return predicate.Image(sql.FieldContains(FieldURL, v)) +} + +// URLHasPrefix applies the HasPrefix predicate on the "url" field. +func URLHasPrefix(v string) predicate.Image { + return predicate.Image(sql.FieldHasPrefix(FieldURL, v)) +} + +// URLHasSuffix applies the HasSuffix predicate on the "url" field. +func URLHasSuffix(v string) predicate.Image { + return predicate.Image(sql.FieldHasSuffix(FieldURL, v)) +} + +// URLEqualFold applies the EqualFold predicate on the "url" field. +func URLEqualFold(v string) predicate.Image { + return predicate.Image(sql.FieldEqualFold(FieldURL, v)) +} + +// URLContainsFold applies the ContainsFold predicate on the "url" field. +func URLContainsFold(v string) predicate.Image { + return predicate.Image(sql.FieldContainsFold(FieldURL, v)) +} + +// MimeTypeEQ applies the EQ predicate on the "mimeType" field. +func MimeTypeEQ(v string) predicate.Image { + return predicate.Image(sql.FieldEQ(FieldMimeType, v)) +} + +// MimeTypeNEQ applies the NEQ predicate on the "mimeType" field. +func MimeTypeNEQ(v string) predicate.Image { + return predicate.Image(sql.FieldNEQ(FieldMimeType, v)) +} + +// MimeTypeIn applies the In predicate on the "mimeType" field. +func MimeTypeIn(vs ...string) predicate.Image { + return predicate.Image(sql.FieldIn(FieldMimeType, vs...)) +} + +// MimeTypeNotIn applies the NotIn predicate on the "mimeType" field. +func MimeTypeNotIn(vs ...string) predicate.Image { + return predicate.Image(sql.FieldNotIn(FieldMimeType, vs...)) +} + +// MimeTypeGT applies the GT predicate on the "mimeType" field. +func MimeTypeGT(v string) predicate.Image { + return predicate.Image(sql.FieldGT(FieldMimeType, v)) +} + +// MimeTypeGTE applies the GTE predicate on the "mimeType" field. +func MimeTypeGTE(v string) predicate.Image { + return predicate.Image(sql.FieldGTE(FieldMimeType, v)) +} + +// MimeTypeLT applies the LT predicate on the "mimeType" field. +func MimeTypeLT(v string) predicate.Image { + return predicate.Image(sql.FieldLT(FieldMimeType, v)) +} + +// MimeTypeLTE applies the LTE predicate on the "mimeType" field. +func MimeTypeLTE(v string) predicate.Image { + return predicate.Image(sql.FieldLTE(FieldMimeType, v)) +} + +// MimeTypeContains applies the Contains predicate on the "mimeType" field. +func MimeTypeContains(v string) predicate.Image { + return predicate.Image(sql.FieldContains(FieldMimeType, v)) +} + +// MimeTypeHasPrefix applies the HasPrefix predicate on the "mimeType" field. +func MimeTypeHasPrefix(v string) predicate.Image { + return predicate.Image(sql.FieldHasPrefix(FieldMimeType, v)) +} + +// MimeTypeHasSuffix applies the HasSuffix predicate on the "mimeType" field. +func MimeTypeHasSuffix(v string) predicate.Image { + return predicate.Image(sql.FieldHasSuffix(FieldMimeType, v)) +} + +// MimeTypeEqualFold applies the EqualFold predicate on the "mimeType" field. +func MimeTypeEqualFold(v string) predicate.Image { + return predicate.Image(sql.FieldEqualFold(FieldMimeType, v)) +} + +// MimeTypeContainsFold applies the ContainsFold predicate on the "mimeType" field. +func MimeTypeContainsFold(v string) predicate.Image { + return predicate.Image(sql.FieldContainsFold(FieldMimeType, v)) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Image) predicate.Image { + return predicate.Image(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Image) predicate.Image { + return predicate.Image(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Image) predicate.Image { + return predicate.Image(sql.NotPredicates(p)) +} diff --git a/ent/image_create.go b/ent/image_create.go new file mode 100644 index 0000000..729046e --- /dev/null +++ b/ent/image_create.go @@ -0,0 +1,507 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/image" +) + +// ImageCreate is the builder for creating a Image entity. +type ImageCreate struct { + config + mutation *ImageMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetURL sets the "url" field. +func (ic *ImageCreate) SetURL(s string) *ImageCreate { + ic.mutation.SetURL(s) + return ic +} + +// SetMimeType sets the "mimeType" field. +func (ic *ImageCreate) SetMimeType(s string) *ImageCreate { + ic.mutation.SetMimeType(s) + return ic +} + +// Mutation returns the ImageMutation object of the builder. +func (ic *ImageCreate) Mutation() *ImageMutation { + return ic.mutation +} + +// Save creates the Image in the database. +func (ic *ImageCreate) Save(ctx context.Context) (*Image, error) { + return withHooks(ctx, ic.sqlSave, ic.mutation, ic.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (ic *ImageCreate) SaveX(ctx context.Context) *Image { + v, err := ic.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ic *ImageCreate) Exec(ctx context.Context) error { + _, err := ic.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ic *ImageCreate) ExecX(ctx context.Context) { + if err := ic.Exec(ctx); err != nil { + panic(err) + } +} + +// check runs all checks and user-defined validators on the builder. +func (ic *ImageCreate) check() error { + if _, ok := ic.mutation.URL(); !ok { + return &ValidationError{Name: "url", err: errors.New(`ent: missing required field "Image.url"`)} + } + if _, ok := ic.mutation.MimeType(); !ok { + return &ValidationError{Name: "mimeType", err: errors.New(`ent: missing required field "Image.mimeType"`)} + } + return nil +} + +func (ic *ImageCreate) sqlSave(ctx context.Context) (*Image, error) { + if err := ic.check(); err != nil { + return nil, err + } + _node, _spec := ic.createSpec() + if err := sqlgraph.CreateNode(ctx, ic.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + id := _spec.ID.Value.(int64) + _node.ID = int(id) + ic.mutation.id = &_node.ID + ic.mutation.done = true + return _node, nil +} + +func (ic *ImageCreate) createSpec() (*Image, *sqlgraph.CreateSpec) { + var ( + _node = &Image{config: ic.config} + _spec = sqlgraph.NewCreateSpec(image.Table, sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt)) + ) + _spec.OnConflict = ic.conflict + if value, ok := ic.mutation.URL(); ok { + _spec.SetField(image.FieldURL, field.TypeString, value) + _node.URL = value + } + if value, ok := ic.mutation.MimeType(); ok { + _spec.SetField(image.FieldMimeType, field.TypeString, value) + _node.MimeType = value + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Image.Create(). +// SetURL(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ImageUpsert) { +// SetURL(v+v). +// }). +// Exec(ctx) +func (ic *ImageCreate) OnConflict(opts ...sql.ConflictOption) *ImageUpsertOne { + ic.conflict = opts + return &ImageUpsertOne{ + create: ic, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Image.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ic *ImageCreate) OnConflictColumns(columns ...string) *ImageUpsertOne { + ic.conflict = append(ic.conflict, sql.ConflictColumns(columns...)) + return &ImageUpsertOne{ + create: ic, + } +} + +type ( + // ImageUpsertOne is the builder for "upsert"-ing + // one Image node. + ImageUpsertOne struct { + create *ImageCreate + } + + // ImageUpsert is the "OnConflict" setter. + ImageUpsert struct { + *sql.UpdateSet + } +) + +// SetURL sets the "url" field. +func (u *ImageUpsert) SetURL(v string) *ImageUpsert { + u.Set(image.FieldURL, v) + return u +} + +// UpdateURL sets the "url" field to the value that was provided on create. +func (u *ImageUpsert) UpdateURL() *ImageUpsert { + u.SetExcluded(image.FieldURL) + return u +} + +// SetMimeType sets the "mimeType" field. +func (u *ImageUpsert) SetMimeType(v string) *ImageUpsert { + u.Set(image.FieldMimeType, v) + return u +} + +// UpdateMimeType sets the "mimeType" field to the value that was provided on create. +func (u *ImageUpsert) UpdateMimeType() *ImageUpsert { + u.SetExcluded(image.FieldMimeType) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create. +// Using this option is equivalent to using: +// +// client.Image.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ImageUpsertOne) UpdateNewValues() *ImageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Image.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ImageUpsertOne) Ignore() *ImageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ImageUpsertOne) DoNothing() *ImageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ImageCreate.OnConflict +// documentation for more info. +func (u *ImageUpsertOne) Update(set func(*ImageUpsert)) *ImageUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ImageUpsert{UpdateSet: update}) + })) + return u +} + +// SetURL sets the "url" field. +func (u *ImageUpsertOne) SetURL(v string) *ImageUpsertOne { + return u.Update(func(s *ImageUpsert) { + s.SetURL(v) + }) +} + +// UpdateURL sets the "url" field to the value that was provided on create. +func (u *ImageUpsertOne) UpdateURL() *ImageUpsertOne { + return u.Update(func(s *ImageUpsert) { + s.UpdateURL() + }) +} + +// SetMimeType sets the "mimeType" field. +func (u *ImageUpsertOne) SetMimeType(v string) *ImageUpsertOne { + return u.Update(func(s *ImageUpsert) { + s.SetMimeType(v) + }) +} + +// UpdateMimeType sets the "mimeType" field to the value that was provided on create. +func (u *ImageUpsertOne) UpdateMimeType() *ImageUpsertOne { + return u.Update(func(s *ImageUpsert) { + s.UpdateMimeType() + }) +} + +// Exec executes the query. +func (u *ImageUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ImageCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ImageUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ImageUpsertOne) ID(ctx context.Context) (id int, err error) { + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ImageUpsertOne) IDX(ctx context.Context) int { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ImageCreateBulk is the builder for creating many Image entities in bulk. +type ImageCreateBulk struct { + config + err error + builders []*ImageCreate + conflict []sql.ConflictOption +} + +// Save creates the Image entities in the database. +func (icb *ImageCreateBulk) Save(ctx context.Context) ([]*Image, error) { + if icb.err != nil { + return nil, icb.err + } + specs := make([]*sqlgraph.CreateSpec, len(icb.builders)) + nodes := make([]*Image, len(icb.builders)) + mutators := make([]Mutator, len(icb.builders)) + for i := range icb.builders { + func(i int, root context.Context) { + builder := icb.builders[i] + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ImageMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, icb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = icb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, icb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + if specs[i].ID.Value != nil { + id := specs[i].ID.Value.(int64) + nodes[i].ID = int(id) + } + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, icb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (icb *ImageCreateBulk) SaveX(ctx context.Context) []*Image { + v, err := icb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (icb *ImageCreateBulk) Exec(ctx context.Context) error { + _, err := icb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (icb *ImageCreateBulk) ExecX(ctx context.Context) { + if err := icb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Image.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ImageUpsert) { +// SetURL(v+v). +// }). +// Exec(ctx) +func (icb *ImageCreateBulk) OnConflict(opts ...sql.ConflictOption) *ImageUpsertBulk { + icb.conflict = opts + return &ImageUpsertBulk{ + create: icb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Image.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (icb *ImageCreateBulk) OnConflictColumns(columns ...string) *ImageUpsertBulk { + icb.conflict = append(icb.conflict, sql.ConflictColumns(columns...)) + return &ImageUpsertBulk{ + create: icb, + } +} + +// ImageUpsertBulk is the builder for "upsert"-ing +// a bulk of Image nodes. +type ImageUpsertBulk struct { + create *ImageCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Image.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// ). +// Exec(ctx) +func (u *ImageUpsertBulk) UpdateNewValues() *ImageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Image.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ImageUpsertBulk) Ignore() *ImageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ImageUpsertBulk) DoNothing() *ImageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ImageCreateBulk.OnConflict +// documentation for more info. +func (u *ImageUpsertBulk) Update(set func(*ImageUpsert)) *ImageUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ImageUpsert{UpdateSet: update}) + })) + return u +} + +// SetURL sets the "url" field. +func (u *ImageUpsertBulk) SetURL(v string) *ImageUpsertBulk { + return u.Update(func(s *ImageUpsert) { + s.SetURL(v) + }) +} + +// UpdateURL sets the "url" field to the value that was provided on create. +func (u *ImageUpsertBulk) UpdateURL() *ImageUpsertBulk { + return u.Update(func(s *ImageUpsert) { + s.UpdateURL() + }) +} + +// SetMimeType sets the "mimeType" field. +func (u *ImageUpsertBulk) SetMimeType(v string) *ImageUpsertBulk { + return u.Update(func(s *ImageUpsert) { + s.SetMimeType(v) + }) +} + +// UpdateMimeType sets the "mimeType" field to the value that was provided on create. +func (u *ImageUpsertBulk) UpdateMimeType() *ImageUpsertBulk { + return u.Update(func(s *ImageUpsert) { + s.UpdateMimeType() + }) +} + +// Exec executes the query. +func (u *ImageUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ImageCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ImageCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ImageUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/image_delete.go b/ent/image_delete.go new file mode 100644 index 0000000..c876e0a --- /dev/null +++ b/ent/image_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ImageDelete is the builder for deleting a Image entity. +type ImageDelete struct { + config + hooks []Hook + mutation *ImageMutation +} + +// Where appends a list predicates to the ImageDelete builder. +func (id *ImageDelete) Where(ps ...predicate.Image) *ImageDelete { + id.mutation.Where(ps...) + return id +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (id *ImageDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, id.sqlExec, id.mutation, id.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (id *ImageDelete) ExecX(ctx context.Context) int { + n, err := id.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (id *ImageDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(image.Table, sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt)) + if ps := id.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, id.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + id.mutation.done = true + return affected, err +} + +// ImageDeleteOne is the builder for deleting a single Image entity. +type ImageDeleteOne struct { + id *ImageDelete +} + +// Where appends a list predicates to the ImageDelete builder. +func (ido *ImageDeleteOne) Where(ps ...predicate.Image) *ImageDeleteOne { + ido.id.mutation.Where(ps...) + return ido +} + +// Exec executes the deletion query. +func (ido *ImageDeleteOne) Exec(ctx context.Context) error { + n, err := ido.id.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{image.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ido *ImageDeleteOne) ExecX(ctx context.Context) { + if err := ido.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/image_query.go b/ent/image_query.go new file mode 100644 index 0000000..a6e4cd2 --- /dev/null +++ b/ent/image_query.go @@ -0,0 +1,526 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ImageQuery is the builder for querying Image entities. +type ImageQuery struct { + config + ctx *QueryContext + order []image.OrderOption + inters []Interceptor + predicates []predicate.Image + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ImageQuery builder. +func (iq *ImageQuery) Where(ps ...predicate.Image) *ImageQuery { + iq.predicates = append(iq.predicates, ps...) + return iq +} + +// Limit the number of records to be returned by this query. +func (iq *ImageQuery) Limit(limit int) *ImageQuery { + iq.ctx.Limit = &limit + return iq +} + +// Offset to start from. +func (iq *ImageQuery) Offset(offset int) *ImageQuery { + iq.ctx.Offset = &offset + return iq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (iq *ImageQuery) Unique(unique bool) *ImageQuery { + iq.ctx.Unique = &unique + return iq +} + +// Order specifies how the records should be ordered. +func (iq *ImageQuery) Order(o ...image.OrderOption) *ImageQuery { + iq.order = append(iq.order, o...) + return iq +} + +// First returns the first Image entity from the query. +// Returns a *NotFoundError when no Image was found. +func (iq *ImageQuery) First(ctx context.Context) (*Image, error) { + nodes, err := iq.Limit(1).All(setContextOp(ctx, iq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{image.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (iq *ImageQuery) FirstX(ctx context.Context) *Image { + node, err := iq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Image ID from the query. +// Returns a *NotFoundError when no Image ID was found. +func (iq *ImageQuery) FirstID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = iq.Limit(1).IDs(setContextOp(ctx, iq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{image.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (iq *ImageQuery) FirstIDX(ctx context.Context) int { + id, err := iq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Image entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Image entity is found. +// Returns a *NotFoundError when no Image entities are found. +func (iq *ImageQuery) Only(ctx context.Context) (*Image, error) { + nodes, err := iq.Limit(2).All(setContextOp(ctx, iq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{image.Label} + default: + return nil, &NotSingularError{image.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (iq *ImageQuery) OnlyX(ctx context.Context) *Image { + node, err := iq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Image ID in the query. +// Returns a *NotSingularError when more than one Image ID is found. +// Returns a *NotFoundError when no entities are found. +func (iq *ImageQuery) OnlyID(ctx context.Context) (id int, err error) { + var ids []int + if ids, err = iq.Limit(2).IDs(setContextOp(ctx, iq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{image.Label} + default: + err = &NotSingularError{image.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (iq *ImageQuery) OnlyIDX(ctx context.Context) int { + id, err := iq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Images. +func (iq *ImageQuery) All(ctx context.Context) ([]*Image, error) { + ctx = setContextOp(ctx, iq.ctx, "All") + if err := iq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Image, *ImageQuery]() + return withInterceptors[[]*Image](ctx, iq, qr, iq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (iq *ImageQuery) AllX(ctx context.Context) []*Image { + nodes, err := iq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Image IDs. +func (iq *ImageQuery) IDs(ctx context.Context) (ids []int, err error) { + if iq.ctx.Unique == nil && iq.path != nil { + iq.Unique(true) + } + ctx = setContextOp(ctx, iq.ctx, "IDs") + if err = iq.Select(image.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (iq *ImageQuery) IDsX(ctx context.Context) []int { + ids, err := iq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (iq *ImageQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, iq.ctx, "Count") + if err := iq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, iq, querierCount[*ImageQuery](), iq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (iq *ImageQuery) CountX(ctx context.Context) int { + count, err := iq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (iq *ImageQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, iq.ctx, "Exist") + switch _, err := iq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (iq *ImageQuery) ExistX(ctx context.Context) bool { + exist, err := iq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ImageQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (iq *ImageQuery) Clone() *ImageQuery { + if iq == nil { + return nil + } + return &ImageQuery{ + config: iq.config, + ctx: iq.ctx.Clone(), + order: append([]image.OrderOption{}, iq.order...), + inters: append([]Interceptor{}, iq.inters...), + predicates: append([]predicate.Image{}, iq.predicates...), + // clone intermediate query. + sql: iq.sql.Clone(), + path: iq.path, + } +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// URL string `json:"url,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Image.Query(). +// GroupBy(image.FieldURL). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (iq *ImageQuery) GroupBy(field string, fields ...string) *ImageGroupBy { + iq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ImageGroupBy{build: iq} + grbuild.flds = &iq.ctx.Fields + grbuild.label = image.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// URL string `json:"url,omitempty"` +// } +// +// client.Image.Query(). +// Select(image.FieldURL). +// Scan(ctx, &v) +func (iq *ImageQuery) Select(fields ...string) *ImageSelect { + iq.ctx.Fields = append(iq.ctx.Fields, fields...) + sbuild := &ImageSelect{ImageQuery: iq} + sbuild.label = image.Label + sbuild.flds, sbuild.scan = &iq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ImageSelect configured with the given aggregations. +func (iq *ImageQuery) Aggregate(fns ...AggregateFunc) *ImageSelect { + return iq.Select().Aggregate(fns...) +} + +func (iq *ImageQuery) prepareQuery(ctx context.Context) error { + for _, inter := range iq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, iq); err != nil { + return err + } + } + } + for _, f := range iq.ctx.Fields { + if !image.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if iq.path != nil { + prev, err := iq.path(ctx) + if err != nil { + return err + } + iq.sql = prev + } + return nil +} + +func (iq *ImageQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Image, error) { + var ( + nodes = []*Image{} + _spec = iq.querySpec() + ) + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Image).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Image{config: iq.config} + nodes = append(nodes, node) + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, iq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + return nodes, nil +} + +func (iq *ImageQuery) sqlCount(ctx context.Context) (int, error) { + _spec := iq.querySpec() + _spec.Node.Columns = iq.ctx.Fields + if len(iq.ctx.Fields) > 0 { + _spec.Unique = iq.ctx.Unique != nil && *iq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, iq.driver, _spec) +} + +func (iq *ImageQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(image.Table, image.Columns, sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt)) + _spec.From = iq.sql + if unique := iq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if iq.path != nil { + _spec.Unique = true + } + if fields := iq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, image.FieldID) + for i := range fields { + if fields[i] != image.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := iq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := iq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := iq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := iq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (iq *ImageQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(iq.driver.Dialect()) + t1 := builder.Table(image.Table) + columns := iq.ctx.Fields + if len(columns) == 0 { + columns = image.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if iq.sql != nil { + selector = iq.sql + selector.Select(selector.Columns(columns...)...) + } + if iq.ctx.Unique != nil && *iq.ctx.Unique { + selector.Distinct() + } + for _, p := range iq.predicates { + p(selector) + } + for _, p := range iq.order { + p(selector) + } + if offset := iq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := iq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ImageGroupBy is the group-by builder for Image entities. +type ImageGroupBy struct { + selector + build *ImageQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (igb *ImageGroupBy) Aggregate(fns ...AggregateFunc) *ImageGroupBy { + igb.fns = append(igb.fns, fns...) + return igb +} + +// Scan applies the selector query and scans the result into the given value. +func (igb *ImageGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, igb.build.ctx, "GroupBy") + if err := igb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ImageQuery, *ImageGroupBy](ctx, igb.build, igb, igb.build.inters, v) +} + +func (igb *ImageGroupBy) sqlScan(ctx context.Context, root *ImageQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(igb.fns)) + for _, fn := range igb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*igb.flds)+len(igb.fns)) + for _, f := range *igb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*igb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := igb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ImageSelect is the builder for selecting fields of Image entities. +type ImageSelect struct { + *ImageQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (is *ImageSelect) Aggregate(fns ...AggregateFunc) *ImageSelect { + is.fns = append(is.fns, fns...) + return is +} + +// Scan applies the selector query and scans the result into the given value. +func (is *ImageSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, is.ctx, "Select") + if err := is.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ImageQuery, *ImageSelect](ctx, is.ImageQuery, is, is.inters, v) +} + +func (is *ImageSelect) sqlScan(ctx context.Context, root *ImageQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(is.fns)) + for _, fn := range is.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*is.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := is.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/image_update.go b/ent/image_update.go new file mode 100644 index 0000000..85ed67d --- /dev/null +++ b/ent/image_update.go @@ -0,0 +1,243 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ImageUpdate is the builder for updating Image entities. +type ImageUpdate struct { + config + hooks []Hook + mutation *ImageMutation +} + +// Where appends a list predicates to the ImageUpdate builder. +func (iu *ImageUpdate) Where(ps ...predicate.Image) *ImageUpdate { + iu.mutation.Where(ps...) + return iu +} + +// SetURL sets the "url" field. +func (iu *ImageUpdate) SetURL(s string) *ImageUpdate { + iu.mutation.SetURL(s) + return iu +} + +// SetNillableURL sets the "url" field if the given value is not nil. +func (iu *ImageUpdate) SetNillableURL(s *string) *ImageUpdate { + if s != nil { + iu.SetURL(*s) + } + return iu +} + +// SetMimeType sets the "mimeType" field. +func (iu *ImageUpdate) SetMimeType(s string) *ImageUpdate { + iu.mutation.SetMimeType(s) + return iu +} + +// SetNillableMimeType sets the "mimeType" field if the given value is not nil. +func (iu *ImageUpdate) SetNillableMimeType(s *string) *ImageUpdate { + if s != nil { + iu.SetMimeType(*s) + } + return iu +} + +// Mutation returns the ImageMutation object of the builder. +func (iu *ImageUpdate) Mutation() *ImageMutation { + return iu.mutation +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (iu *ImageUpdate) Save(ctx context.Context) (int, error) { + return withHooks(ctx, iu.sqlSave, iu.mutation, iu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (iu *ImageUpdate) SaveX(ctx context.Context) int { + affected, err := iu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (iu *ImageUpdate) Exec(ctx context.Context) error { + _, err := iu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (iu *ImageUpdate) ExecX(ctx context.Context) { + if err := iu.Exec(ctx); err != nil { + panic(err) + } +} + +func (iu *ImageUpdate) sqlSave(ctx context.Context) (n int, err error) { + _spec := sqlgraph.NewUpdateSpec(image.Table, image.Columns, sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt)) + if ps := iu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := iu.mutation.URL(); ok { + _spec.SetField(image.FieldURL, field.TypeString, value) + } + if value, ok := iu.mutation.MimeType(); ok { + _spec.SetField(image.FieldMimeType, field.TypeString, value) + } + if n, err = sqlgraph.UpdateNodes(ctx, iu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{image.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + iu.mutation.done = true + return n, nil +} + +// ImageUpdateOne is the builder for updating a single Image entity. +type ImageUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ImageMutation +} + +// SetURL sets the "url" field. +func (iuo *ImageUpdateOne) SetURL(s string) *ImageUpdateOne { + iuo.mutation.SetURL(s) + return iuo +} + +// SetNillableURL sets the "url" field if the given value is not nil. +func (iuo *ImageUpdateOne) SetNillableURL(s *string) *ImageUpdateOne { + if s != nil { + iuo.SetURL(*s) + } + return iuo +} + +// SetMimeType sets the "mimeType" field. +func (iuo *ImageUpdateOne) SetMimeType(s string) *ImageUpdateOne { + iuo.mutation.SetMimeType(s) + return iuo +} + +// SetNillableMimeType sets the "mimeType" field if the given value is not nil. +func (iuo *ImageUpdateOne) SetNillableMimeType(s *string) *ImageUpdateOne { + if s != nil { + iuo.SetMimeType(*s) + } + return iuo +} + +// Mutation returns the ImageMutation object of the builder. +func (iuo *ImageUpdateOne) Mutation() *ImageMutation { + return iuo.mutation +} + +// Where appends a list predicates to the ImageUpdate builder. +func (iuo *ImageUpdateOne) Where(ps ...predicate.Image) *ImageUpdateOne { + iuo.mutation.Where(ps...) + return iuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (iuo *ImageUpdateOne) Select(field string, fields ...string) *ImageUpdateOne { + iuo.fields = append([]string{field}, fields...) + return iuo +} + +// Save executes the query and returns the updated Image entity. +func (iuo *ImageUpdateOne) Save(ctx context.Context) (*Image, error) { + return withHooks(ctx, iuo.sqlSave, iuo.mutation, iuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (iuo *ImageUpdateOne) SaveX(ctx context.Context) *Image { + node, err := iuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (iuo *ImageUpdateOne) Exec(ctx context.Context) error { + _, err := iuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (iuo *ImageUpdateOne) ExecX(ctx context.Context) { + if err := iuo.Exec(ctx); err != nil { + panic(err) + } +} + +func (iuo *ImageUpdateOne) sqlSave(ctx context.Context) (_node *Image, err error) { + _spec := sqlgraph.NewUpdateSpec(image.Table, image.Columns, sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt)) + id, ok := iuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Image.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := iuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, image.FieldID) + for _, f := range fields { + if !image.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != image.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := iuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := iuo.mutation.URL(); ok { + _spec.SetField(image.FieldURL, field.TypeString, value) + } + if value, ok := iuo.mutation.MimeType(); ok { + _spec.SetField(image.FieldMimeType, field.TypeString, value) + } + _node = &Image{config: iuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, iuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{image.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + iuo.mutation.done = true + return _node, nil +} diff --git a/ent/internal/schema.go b/ent/internal/schema.go new file mode 100644 index 0000000..3b19b62 --- /dev/null +++ b/ent/internal/schema.go @@ -0,0 +1,9 @@ +// Code generated by ent, DO NOT EDIT. + +//go:build tools +// +build tools + +// Package internal holds a loadable version of the latest schema. +package internal + +const Schema = "{\"Schema\":\"github.com/lysand-org/versia-go/ent/schema\",\"Package\":\"github.com/lysand-org/versia-go/ent\",\"Schemas\":[{\"name\":\"Attachment\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"author\",\"type\":\"User\",\"unique\":true,\"required\":true}],\"fields\":[{\"name\":\"id\",\"type\":{\"Type\":4,\"Ident\":\"uuid.UUID\",\"PkgPath\":\"github.com/google/uuid\",\"PkgName\":\"uuid\",\"Nillable\":false,\"RType\":{\"Name\":\"UUID\",\"Ident\":\"uuid.UUID\",\"Kind\":17,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":{\"ClockSequence\":{\"In\":[],\"Out\":[{\"Name\":\"int\",\"Ident\":\"int\",\"Kind\":2,\"PkgPath\":\"\",\"Methods\":null}]},\"Domain\":{\"In\":[],\"Out\":[{\"Name\":\"Domain\",\"Ident\":\"uuid.Domain\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"ID\":{\"In\":[],\"Out\":[{\"Name\":\"uint32\",\"Ident\":\"uint32\",\"Kind\":10,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalBinary\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalText\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"NodeID\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]},\"Scan\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"Time\":{\"In\":[],\"Out\":[{\"Name\":\"Time\",\"Ident\":\"uuid.Time\",\"Kind\":6,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"URN\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalBinary\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalText\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Value\":{\"In\":[],\"Out\":[{\"Name\":\"Value\",\"Ident\":\"driver.Value\",\"Kind\":20,\"PkgPath\":\"database/sql/driver\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Variant\":{\"In\":[],\"Out\":[{\"Name\":\"Variant\",\"Ident\":\"uuid.Variant\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"Version\":{\"In\":[],\"Out\":[{\"Name\":\"Version\",\"Ident\":\"uuid.Version\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]}}}},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"isRemote\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"uri\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"extensions\",\"type\":{\"Type\":3,\"Ident\":\"lysand.Extensions\",\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"PkgName\":\"lysand\",\"Nillable\":true,\"RType\":{\"Name\":\"Extensions\",\"Ident\":\"lysand.Extensions\",\"Kind\":21,\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"Methods\":{}}},\"default\":true,\"default_value\":{},\"default_kind\":21,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":4,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":5,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"description\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"size\":384,\"validators\":1,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"sha256\",\"type\":{\"Type\":5,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":true,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"size\",\"type\":{\"Type\":12,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"blurhash\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":3,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"height\",\"type\":{\"Type\":12,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":4,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"width\",\"type\":{\"Type\":12,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":5,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"fps\",\"type\":{\"Type\":12,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":6,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"mimeType\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":7,\"MixedIn\":false,\"MixinIndex\":0}}]},{\"name\":\"Follow\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"follower\",\"type\":\"User\",\"unique\":true,\"required\":true},{\"name\":\"followee\",\"type\":\"User\",\"unique\":true,\"required\":true}],\"fields\":[{\"name\":\"id\",\"type\":{\"Type\":4,\"Ident\":\"uuid.UUID\",\"PkgPath\":\"github.com/google/uuid\",\"PkgName\":\"uuid\",\"Nillable\":false,\"RType\":{\"Name\":\"UUID\",\"Ident\":\"uuid.UUID\",\"Kind\":17,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":{\"ClockSequence\":{\"In\":[],\"Out\":[{\"Name\":\"int\",\"Ident\":\"int\",\"Kind\":2,\"PkgPath\":\"\",\"Methods\":null}]},\"Domain\":{\"In\":[],\"Out\":[{\"Name\":\"Domain\",\"Ident\":\"uuid.Domain\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"ID\":{\"In\":[],\"Out\":[{\"Name\":\"uint32\",\"Ident\":\"uint32\",\"Kind\":10,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalBinary\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalText\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"NodeID\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]},\"Scan\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"Time\":{\"In\":[],\"Out\":[{\"Name\":\"Time\",\"Ident\":\"uuid.Time\",\"Kind\":6,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"URN\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalBinary\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalText\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Value\":{\"In\":[],\"Out\":[{\"Name\":\"Value\",\"Ident\":\"driver.Value\",\"Kind\":20,\"PkgPath\":\"database/sql/driver\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Variant\":{\"In\":[],\"Out\":[{\"Name\":\"Variant\",\"Ident\":\"uuid.Variant\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"Version\":{\"In\":[],\"Out\":[{\"Name\":\"Version\",\"Ident\":\"uuid.Version\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]}}}},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"isRemote\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"uri\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"extensions\",\"type\":{\"Type\":3,\"Ident\":\"lysand.Extensions\",\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"PkgName\":\"lysand\",\"Nillable\":true,\"RType\":{\"Name\":\"Extensions\",\"Ident\":\"lysand.Extensions\",\"Kind\":21,\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"Methods\":{}}},\"default\":true,\"default_value\":{},\"default_kind\":21,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":4,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":5,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"status\",\"type\":{\"Type\":6,\"Ident\":\"follow.Status\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"enums\":[{\"N\":\"pending\",\"V\":\"pending\"},{\"N\":\"accepted\",\"V\":\"accepted\"}],\"default\":true,\"default_value\":\"pending\",\"default_kind\":24,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0}}],\"indexes\":[{\"unique\":true,\"edges\":[\"follower\",\"followee\"]}]},{\"name\":\"Image\",\"config\":{\"Table\":\"\"},\"fields\":[{\"name\":\"url\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"mimeType\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0}}]},{\"name\":\"Note\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"author\",\"type\":\"User\",\"unique\":true,\"required\":true},{\"name\":\"mentions\",\"type\":\"User\"},{\"name\":\"attachments\",\"type\":\"Attachment\"}],\"fields\":[{\"name\":\"id\",\"type\":{\"Type\":4,\"Ident\":\"uuid.UUID\",\"PkgPath\":\"github.com/google/uuid\",\"PkgName\":\"uuid\",\"Nillable\":false,\"RType\":{\"Name\":\"UUID\",\"Ident\":\"uuid.UUID\",\"Kind\":17,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":{\"ClockSequence\":{\"In\":[],\"Out\":[{\"Name\":\"int\",\"Ident\":\"int\",\"Kind\":2,\"PkgPath\":\"\",\"Methods\":null}]},\"Domain\":{\"In\":[],\"Out\":[{\"Name\":\"Domain\",\"Ident\":\"uuid.Domain\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"ID\":{\"In\":[],\"Out\":[{\"Name\":\"uint32\",\"Ident\":\"uint32\",\"Kind\":10,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalBinary\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalText\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"NodeID\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]},\"Scan\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"Time\":{\"In\":[],\"Out\":[{\"Name\":\"Time\",\"Ident\":\"uuid.Time\",\"Kind\":6,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"URN\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalBinary\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalText\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Value\":{\"In\":[],\"Out\":[{\"Name\":\"Value\",\"Ident\":\"driver.Value\",\"Kind\":20,\"PkgPath\":\"database/sql/driver\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Variant\":{\"In\":[],\"Out\":[{\"Name\":\"Variant\",\"Ident\":\"uuid.Variant\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"Version\":{\"In\":[],\"Out\":[{\"Name\":\"Version\",\"Ident\":\"uuid.Version\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]}}}},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"isRemote\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"uri\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"extensions\",\"type\":{\"Type\":3,\"Ident\":\"lysand.Extensions\",\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"PkgName\":\"lysand\",\"Nillable\":true,\"RType\":{\"Name\":\"Extensions\",\"Ident\":\"lysand.Extensions\",\"Kind\":21,\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"Methods\":{}}},\"default\":true,\"default_value\":{},\"default_kind\":21,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":4,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":5,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"subject\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"size\":384,\"nillable\":true,\"optional\":true,\"validators\":1,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"content\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"isSensitive\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_value\":false,\"default_kind\":1,\"position\":{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"visibility\",\"type\":{\"Type\":6,\"Ident\":\"note.Visibility\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"enums\":[{\"N\":\"public\",\"V\":\"public\"},{\"N\":\"unlisted\",\"V\":\"unlisted\"},{\"N\":\"followers\",\"V\":\"followers\"},{\"N\":\"direct\",\"V\":\"direct\"}],\"default\":true,\"default_value\":\"public\",\"default_kind\":24,\"position\":{\"Index\":3,\"MixedIn\":false,\"MixinIndex\":0}}]},{\"name\":\"ServerMetadata\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"follower\",\"type\":\"User\",\"unique\":true,\"required\":true},{\"name\":\"followee\",\"type\":\"User\",\"unique\":true,\"required\":true}],\"fields\":[{\"name\":\"id\",\"type\":{\"Type\":4,\"Ident\":\"uuid.UUID\",\"PkgPath\":\"github.com/google/uuid\",\"PkgName\":\"uuid\",\"Nillable\":false,\"RType\":{\"Name\":\"UUID\",\"Ident\":\"uuid.UUID\",\"Kind\":17,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":{\"ClockSequence\":{\"In\":[],\"Out\":[{\"Name\":\"int\",\"Ident\":\"int\",\"Kind\":2,\"PkgPath\":\"\",\"Methods\":null}]},\"Domain\":{\"In\":[],\"Out\":[{\"Name\":\"Domain\",\"Ident\":\"uuid.Domain\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"ID\":{\"In\":[],\"Out\":[{\"Name\":\"uint32\",\"Ident\":\"uint32\",\"Kind\":10,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalBinary\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalText\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"NodeID\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]},\"Scan\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"Time\":{\"In\":[],\"Out\":[{\"Name\":\"Time\",\"Ident\":\"uuid.Time\",\"Kind\":6,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"URN\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalBinary\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalText\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Value\":{\"In\":[],\"Out\":[{\"Name\":\"Value\",\"Ident\":\"driver.Value\",\"Kind\":20,\"PkgPath\":\"database/sql/driver\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Variant\":{\"In\":[],\"Out\":[{\"Name\":\"Variant\",\"Ident\":\"uuid.Variant\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"Version\":{\"In\":[],\"Out\":[{\"Name\":\"Version\",\"Ident\":\"uuid.Version\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]}}}},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"isRemote\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"uri\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"extensions\",\"type\":{\"Type\":3,\"Ident\":\"lysand.Extensions\",\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"PkgName\":\"lysand\",\"Nillable\":true,\"RType\":{\"Name\":\"Extensions\",\"Ident\":\"lysand.Extensions\",\"Kind\":21,\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"Methods\":{}}},\"default\":true,\"default_value\":{},\"default_kind\":21,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":4,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":5,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"name\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"description\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"version\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"supportedExtensions\",\"type\":{\"Type\":3,\"Ident\":\"[]string\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":true,\"RType\":{\"Name\":\"\",\"Ident\":\"[]string\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":{}}},\"default\":true,\"default_value\":[],\"default_kind\":23,\"position\":{\"Index\":3,\"MixedIn\":false,\"MixinIndex\":0}}],\"indexes\":[{\"unique\":true,\"edges\":[\"follower\",\"followee\"]}]},{\"name\":\"User\",\"config\":{\"Table\":\"\"},\"edges\":[{\"name\":\"avatarImage\",\"type\":\"Image\",\"unique\":true},{\"name\":\"headerImage\",\"type\":\"Image\",\"unique\":true},{\"name\":\"authoredNotes\",\"type\":\"Note\",\"ref_name\":\"author\",\"inverse\":true},{\"name\":\"mentionedNotes\",\"type\":\"Note\",\"ref_name\":\"mentions\",\"inverse\":true}],\"fields\":[{\"name\":\"id\",\"type\":{\"Type\":4,\"Ident\":\"uuid.UUID\",\"PkgPath\":\"github.com/google/uuid\",\"PkgName\":\"uuid\",\"Nillable\":false,\"RType\":{\"Name\":\"UUID\",\"Ident\":\"uuid.UUID\",\"Kind\":17,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":{\"ClockSequence\":{\"In\":[],\"Out\":[{\"Name\":\"int\",\"Ident\":\"int\",\"Kind\":2,\"PkgPath\":\"\",\"Methods\":null}]},\"Domain\":{\"In\":[],\"Out\":[{\"Name\":\"Domain\",\"Ident\":\"uuid.Domain\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"ID\":{\"In\":[],\"Out\":[{\"Name\":\"uint32\",\"Ident\":\"uint32\",\"Kind\":10,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalBinary\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"MarshalText\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"NodeID\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]},\"Scan\":{\"In\":[{\"Name\":\"\",\"Ident\":\"interface {}\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"String\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"Time\":{\"In\":[],\"Out\":[{\"Name\":\"Time\",\"Ident\":\"uuid.Time\",\"Kind\":6,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"URN\":{\"In\":[],\"Out\":[{\"Name\":\"string\",\"Ident\":\"string\",\"Kind\":24,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalBinary\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"UnmarshalText\":{\"In\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}],\"Out\":[{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Value\":{\"In\":[],\"Out\":[{\"Name\":\"Value\",\"Ident\":\"driver.Value\",\"Kind\":20,\"PkgPath\":\"database/sql/driver\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]},\"Variant\":{\"In\":[],\"Out\":[{\"Name\":\"Variant\",\"Ident\":\"uuid.Variant\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]},\"Version\":{\"In\":[],\"Out\":[{\"Name\":\"Version\",\"Ident\":\"uuid.Version\",\"Kind\":8,\"PkgPath\":\"github.com/google/uuid\",\"Methods\":null}]}}}},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":0,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"isRemote\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"position\":{\"Index\":1,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"uri\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"extensions\",\"type\":{\"Type\":3,\"Ident\":\"lysand.Extensions\",\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"PkgName\":\"lysand\",\"Nillable\":true,\"RType\":{\"Name\":\"Extensions\",\"Ident\":\"lysand.Extensions\",\"Kind\":21,\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"Methods\":{}}},\"default\":true,\"default_value\":{},\"default_kind\":21,\"position\":{\"Index\":3,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"created_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"immutable\":true,\"position\":{\"Index\":4,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"updated_at\",\"type\":{\"Type\":2,\"Ident\":\"\",\"PkgPath\":\"time\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_kind\":19,\"update_default\":true,\"position\":{\"Index\":5,\"MixedIn\":true,\"MixinIndex\":0}},{\"name\":\"username\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"size\":32,\"unique\":true,\"validators\":2,\"position\":{\"Index\":0,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"passwordHash\",\"type\":{\"Type\":5,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":true,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":1,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"displayName\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"size\":256,\"nillable\":true,\"optional\":true,\"validators\":1,\"position\":{\"Index\":2,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"biography\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"nillable\":true,\"optional\":true,\"position\":{\"Index\":3,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"publicKey\",\"type\":{\"Type\":5,\"Ident\":\"ed25519.PublicKey\",\"PkgPath\":\"crypto/ed25519\",\"PkgName\":\"ed25519\",\"Nillable\":true,\"RType\":{\"Name\":\"PublicKey\",\"Ident\":\"ed25519.PublicKey\",\"Kind\":23,\"PkgPath\":\"crypto/ed25519\",\"Methods\":{\"Equal\":{\"In\":[{\"Name\":\"PublicKey\",\"Ident\":\"crypto.PublicKey\",\"Kind\":20,\"PkgPath\":\"crypto\",\"Methods\":null}],\"Out\":[{\"Name\":\"bool\",\"Ident\":\"bool\",\"Kind\":1,\"PkgPath\":\"\",\"Methods\":null}]}}}},\"position\":{\"Index\":4,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"privateKey\",\"type\":{\"Type\":5,\"Ident\":\"ed25519.PrivateKey\",\"PkgPath\":\"crypto/ed25519\",\"PkgName\":\"ed25519\",\"Nillable\":true,\"RType\":{\"Name\":\"PrivateKey\",\"Ident\":\"ed25519.PrivateKey\",\"Kind\":23,\"PkgPath\":\"crypto/ed25519\",\"Methods\":{\"Equal\":{\"In\":[{\"Name\":\"PrivateKey\",\"Ident\":\"crypto.PrivateKey\",\"Kind\":20,\"PkgPath\":\"crypto\",\"Methods\":null}],\"Out\":[{\"Name\":\"bool\",\"Ident\":\"bool\",\"Kind\":1,\"PkgPath\":\"\",\"Methods\":null}]},\"Public\":{\"In\":[],\"Out\":[{\"Name\":\"PublicKey\",\"Ident\":\"crypto.PublicKey\",\"Kind\":20,\"PkgPath\":\"crypto\",\"Methods\":null}]},\"Seed\":{\"In\":[],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null}]},\"Sign\":{\"In\":[{\"Name\":\"Reader\",\"Ident\":\"io.Reader\",\"Kind\":20,\"PkgPath\":\"io\",\"Methods\":null},{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"SignerOpts\",\"Ident\":\"crypto.SignerOpts\",\"Kind\":20,\"PkgPath\":\"crypto\",\"Methods\":null}],\"Out\":[{\"Name\":\"\",\"Ident\":\"[]uint8\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":null},{\"Name\":\"error\",\"Ident\":\"error\",\"Kind\":20,\"PkgPath\":\"\",\"Methods\":null}]}}}},\"optional\":true,\"position\":{\"Index\":5,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"indexable\",\"type\":{\"Type\":1,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"default\":true,\"default_value\":true,\"default_kind\":1,\"position\":{\"Index\":6,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"privacyLevel\",\"type\":{\"Type\":6,\"Ident\":\"user.PrivacyLevel\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"enums\":[{\"N\":\"public\",\"V\":\"public\"},{\"N\":\"restricted\",\"V\":\"restricted\"},{\"N\":\"private\",\"V\":\"private\"}],\"default\":true,\"default_value\":\"public\",\"default_kind\":24,\"position\":{\"Index\":7,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"fields\",\"type\":{\"Type\":3,\"Ident\":\"[]lysand.Field\",\"PkgPath\":\"github.com/lysand-org/versia-go/pkg/lysand\",\"PkgName\":\"lysand\",\"Nillable\":true,\"RType\":{\"Name\":\"\",\"Ident\":\"[]lysand.Field\",\"Kind\":23,\"PkgPath\":\"\",\"Methods\":{}}},\"default\":true,\"default_value\":[],\"default_kind\":23,\"position\":{\"Index\":8,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"inbox\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":9,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"featured\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":10,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"followers\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":11,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"following\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":12,\"MixedIn\":false,\"MixinIndex\":0}},{\"name\":\"outbox\",\"type\":{\"Type\":7,\"Ident\":\"\",\"PkgPath\":\"\",\"PkgName\":\"\",\"Nillable\":false,\"RType\":null},\"validators\":1,\"position\":{\"Index\":13,\"MixedIn\":false,\"MixinIndex\":0}}]}],\"Features\":[\"schema/snapshot\",\"sql/upsert\"]}" diff --git a/ent/migrate/migrate.go b/ent/migrate/migrate.go new file mode 100644 index 0000000..1956a6b --- /dev/null +++ b/ent/migrate/migrate.go @@ -0,0 +1,64 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "context" + "fmt" + "io" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql/schema" +) + +var ( + // WithGlobalUniqueID sets the universal ids options to the migration. + // If this option is enabled, ent migration will allocate a 1<<32 range + // for the ids of each entity (table). + // Note that this option cannot be applied on tables that already exist. + WithGlobalUniqueID = schema.WithGlobalUniqueID + // WithDropColumn sets the drop column option to the migration. + // If this option is enabled, ent migration will drop old columns + // that were used for both fields and edges. This defaults to false. + WithDropColumn = schema.WithDropColumn + // WithDropIndex sets the drop index option to the migration. + // If this option is enabled, ent migration will drop old indexes + // that were defined in the schema. This defaults to false. + // Note that unique constraints are defined using `UNIQUE INDEX`, + // and therefore, it's recommended to enable this option to get more + // flexibility in the schema changes. + WithDropIndex = schema.WithDropIndex + // WithForeignKeys enables creating foreign-key in schema DDL. This defaults to true. + WithForeignKeys = schema.WithForeignKeys +) + +// Schema is the API for creating, migrating and dropping a schema. +type Schema struct { + drv dialect.Driver +} + +// NewSchema creates a new schema client. +func NewSchema(drv dialect.Driver) *Schema { return &Schema{drv: drv} } + +// Create creates all schema resources. +func (s *Schema) Create(ctx context.Context, opts ...schema.MigrateOption) error { + return Create(ctx, s, Tables, opts...) +} + +// Create creates all table resources using the given schema driver. +func Create(ctx context.Context, s *Schema, tables []*schema.Table, opts ...schema.MigrateOption) error { + migrate, err := schema.NewMigrate(s.drv, opts...) + if err != nil { + return fmt.Errorf("ent/migrate: %w", err) + } + return migrate.Create(ctx, tables...) +} + +// WriteTo writes the schema changes to w instead of running them against the database. +// +// if err := client.Schema.WriteTo(context.Background(), os.Stdout); err != nil { +// log.Fatal(err) +// } +func (s *Schema) WriteTo(ctx context.Context, w io.Writer, opts ...schema.MigrateOption) error { + return Create(ctx, &Schema{drv: &schema.WriteDriver{Writer: w, Driver: s.drv}}, Tables, opts...) +} diff --git a/ent/migrate/schema.go b/ent/migrate/schema.go new file mode 100644 index 0000000..1d937be --- /dev/null +++ b/ent/migrate/schema.go @@ -0,0 +1,265 @@ +// Code generated by ent, DO NOT EDIT. + +package migrate + +import ( + "entgo.io/ent/dialect/sql/schema" + "entgo.io/ent/schema/field" +) + +var ( + // AttachmentsColumns holds the columns for the "attachments" table. + AttachmentsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "is_remote", Type: field.TypeBool}, + {Name: "uri", Type: field.TypeString}, + {Name: "extensions", Type: field.TypeJSON}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "description", Type: field.TypeString, Size: 384}, + {Name: "sha256", Type: field.TypeBytes}, + {Name: "size", Type: field.TypeInt}, + {Name: "blurhash", Type: field.TypeString, Nullable: true}, + {Name: "height", Type: field.TypeInt, Nullable: true}, + {Name: "width", Type: field.TypeInt, Nullable: true}, + {Name: "fps", Type: field.TypeInt, Nullable: true}, + {Name: "mime_type", Type: field.TypeString}, + {Name: "attachment_author", Type: field.TypeUUID}, + {Name: "note_attachments", Type: field.TypeUUID, Nullable: true}, + } + // AttachmentsTable holds the schema information for the "attachments" table. + AttachmentsTable = &schema.Table{ + Name: "attachments", + Columns: AttachmentsColumns, + PrimaryKey: []*schema.Column{AttachmentsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "attachments_users_author", + Columns: []*schema.Column{AttachmentsColumns[14]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "attachments_notes_attachments", + Columns: []*schema.Column{AttachmentsColumns[15]}, + RefColumns: []*schema.Column{NotesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // FollowsColumns holds the columns for the "follows" table. + FollowsColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "is_remote", Type: field.TypeBool}, + {Name: "uri", Type: field.TypeString}, + {Name: "extensions", Type: field.TypeJSON}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "status", Type: field.TypeEnum, Enums: []string{"pending", "accepted"}, Default: "pending"}, + {Name: "follow_follower", Type: field.TypeUUID}, + {Name: "follow_followee", Type: field.TypeUUID}, + } + // FollowsTable holds the schema information for the "follows" table. + FollowsTable = &schema.Table{ + Name: "follows", + Columns: FollowsColumns, + PrimaryKey: []*schema.Column{FollowsColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "follows_users_follower", + Columns: []*schema.Column{FollowsColumns[7]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "follows_users_followee", + Columns: []*schema.Column{FollowsColumns[8]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "follow_follow_follower_follow_followee", + Unique: true, + Columns: []*schema.Column{FollowsColumns[7], FollowsColumns[8]}, + }, + }, + } + // ImagesColumns holds the columns for the "images" table. + ImagesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeInt, Increment: true}, + {Name: "url", Type: field.TypeString}, + {Name: "mime_type", Type: field.TypeString}, + } + // ImagesTable holds the schema information for the "images" table. + ImagesTable = &schema.Table{ + Name: "images", + Columns: ImagesColumns, + PrimaryKey: []*schema.Column{ImagesColumns[0]}, + } + // NotesColumns holds the columns for the "notes" table. + NotesColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "is_remote", Type: field.TypeBool}, + {Name: "uri", Type: field.TypeString}, + {Name: "extensions", Type: field.TypeJSON}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "subject", Type: field.TypeString, Nullable: true, Size: 384}, + {Name: "content", Type: field.TypeString}, + {Name: "is_sensitive", Type: field.TypeBool, Default: false}, + {Name: "visibility", Type: field.TypeEnum, Enums: []string{"public", "unlisted", "followers", "direct"}, Default: "public"}, + {Name: "note_author", Type: field.TypeUUID}, + } + // NotesTable holds the schema information for the "notes" table. + NotesTable = &schema.Table{ + Name: "notes", + Columns: NotesColumns, + PrimaryKey: []*schema.Column{NotesColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "notes_users_author", + Columns: []*schema.Column{NotesColumns[10]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + } + // ServerMetadataColumns holds the columns for the "server_metadata" table. + ServerMetadataColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "is_remote", Type: field.TypeBool}, + {Name: "uri", Type: field.TypeString}, + {Name: "extensions", Type: field.TypeJSON}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "name", Type: field.TypeString}, + {Name: "description", Type: field.TypeString, Nullable: true}, + {Name: "version", Type: field.TypeString}, + {Name: "supported_extensions", Type: field.TypeJSON}, + {Name: "server_metadata_follower", Type: field.TypeUUID}, + {Name: "server_metadata_followee", Type: field.TypeUUID}, + } + // ServerMetadataTable holds the schema information for the "server_metadata" table. + ServerMetadataTable = &schema.Table{ + Name: "server_metadata", + Columns: ServerMetadataColumns, + PrimaryKey: []*schema.Column{ServerMetadataColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "server_metadata_users_follower", + Columns: []*schema.Column{ServerMetadataColumns[10]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + { + Symbol: "server_metadata_users_followee", + Columns: []*schema.Column{ServerMetadataColumns[11]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.NoAction, + }, + }, + Indexes: []*schema.Index{ + { + Name: "servermetadata_server_metadata_follower_server_metadata_followee", + Unique: true, + Columns: []*schema.Column{ServerMetadataColumns[10], ServerMetadataColumns[11]}, + }, + }, + } + // UsersColumns holds the columns for the "users" table. + UsersColumns = []*schema.Column{ + {Name: "id", Type: field.TypeUUID}, + {Name: "is_remote", Type: field.TypeBool}, + {Name: "uri", Type: field.TypeString}, + {Name: "extensions", Type: field.TypeJSON}, + {Name: "created_at", Type: field.TypeTime}, + {Name: "updated_at", Type: field.TypeTime}, + {Name: "username", Type: field.TypeString, Unique: true, Size: 32}, + {Name: "password_hash", Type: field.TypeBytes, Nullable: true}, + {Name: "display_name", Type: field.TypeString, Nullable: true, Size: 256}, + {Name: "biography", Type: field.TypeString, Nullable: true}, + {Name: "public_key", Type: field.TypeBytes}, + {Name: "private_key", Type: field.TypeBytes, Nullable: true}, + {Name: "indexable", Type: field.TypeBool, Default: true}, + {Name: "privacy_level", Type: field.TypeEnum, Enums: []string{"public", "restricted", "private"}, Default: "public"}, + {Name: "fields", Type: field.TypeJSON}, + {Name: "inbox", Type: field.TypeString}, + {Name: "featured", Type: field.TypeString}, + {Name: "followers", Type: field.TypeString}, + {Name: "following", Type: field.TypeString}, + {Name: "outbox", Type: field.TypeString}, + {Name: "user_avatar_image", Type: field.TypeInt, Nullable: true}, + {Name: "user_header_image", Type: field.TypeInt, Nullable: true}, + } + // UsersTable holds the schema information for the "users" table. + UsersTable = &schema.Table{ + Name: "users", + Columns: UsersColumns, + PrimaryKey: []*schema.Column{UsersColumns[0]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "users_images_avatarImage", + Columns: []*schema.Column{UsersColumns[20]}, + RefColumns: []*schema.Column{ImagesColumns[0]}, + OnDelete: schema.SetNull, + }, + { + Symbol: "users_images_headerImage", + Columns: []*schema.Column{UsersColumns[21]}, + RefColumns: []*schema.Column{ImagesColumns[0]}, + OnDelete: schema.SetNull, + }, + }, + } + // NoteMentionsColumns holds the columns for the "note_mentions" table. + NoteMentionsColumns = []*schema.Column{ + {Name: "note_id", Type: field.TypeUUID}, + {Name: "user_id", Type: field.TypeUUID}, + } + // NoteMentionsTable holds the schema information for the "note_mentions" table. + NoteMentionsTable = &schema.Table{ + Name: "note_mentions", + Columns: NoteMentionsColumns, + PrimaryKey: []*schema.Column{NoteMentionsColumns[0], NoteMentionsColumns[1]}, + ForeignKeys: []*schema.ForeignKey{ + { + Symbol: "note_mentions_note_id", + Columns: []*schema.Column{NoteMentionsColumns[0]}, + RefColumns: []*schema.Column{NotesColumns[0]}, + OnDelete: schema.Cascade, + }, + { + Symbol: "note_mentions_user_id", + Columns: []*schema.Column{NoteMentionsColumns[1]}, + RefColumns: []*schema.Column{UsersColumns[0]}, + OnDelete: schema.Cascade, + }, + }, + } + // Tables holds all the tables in the schema. + Tables = []*schema.Table{ + AttachmentsTable, + FollowsTable, + ImagesTable, + NotesTable, + ServerMetadataTable, + UsersTable, + NoteMentionsTable, + } +) + +func init() { + AttachmentsTable.ForeignKeys[0].RefTable = UsersTable + AttachmentsTable.ForeignKeys[1].RefTable = NotesTable + FollowsTable.ForeignKeys[0].RefTable = UsersTable + FollowsTable.ForeignKeys[1].RefTable = UsersTable + NotesTable.ForeignKeys[0].RefTable = UsersTable + ServerMetadataTable.ForeignKeys[0].RefTable = UsersTable + ServerMetadataTable.ForeignKeys[1].RefTable = UsersTable + UsersTable.ForeignKeys[0].RefTable = ImagesTable + UsersTable.ForeignKeys[1].RefTable = ImagesTable + NoteMentionsTable.ForeignKeys[0].RefTable = NotesTable + NoteMentionsTable.ForeignKeys[1].RefTable = UsersTable +} diff --git a/ent/mutation.go b/ent/mutation.go new file mode 100644 index 0000000..b93b028 --- /dev/null +++ b/ent/mutation.go @@ -0,0 +1,6055 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "crypto/ed25519" + "errors" + "fmt" + "sync" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const ( + // Operation types. + OpCreate = ent.OpCreate + OpDelete = ent.OpDelete + OpDeleteOne = ent.OpDeleteOne + OpUpdate = ent.OpUpdate + OpUpdateOne = ent.OpUpdateOne + + // Node types. + TypeAttachment = "Attachment" + TypeFollow = "Follow" + TypeImage = "Image" + TypeNote = "Note" + TypeServerMetadata = "ServerMetadata" + TypeUser = "User" +) + +// AttachmentMutation represents an operation that mutates the Attachment nodes in the graph. +type AttachmentMutation struct { + config + op Op + typ string + id *uuid.UUID + isRemote *bool + uri *string + extensions *lysand.Extensions + created_at *time.Time + updated_at *time.Time + description *string + sha256 *[]byte + size *int + addsize *int + blurhash *string + height *int + addheight *int + width *int + addwidth *int + fps *int + addfps *int + mimeType *string + clearedFields map[string]struct{} + author *uuid.UUID + clearedauthor bool + done bool + oldValue func(context.Context) (*Attachment, error) + predicates []predicate.Attachment +} + +var _ ent.Mutation = (*AttachmentMutation)(nil) + +// attachmentOption allows management of the mutation configuration using functional options. +type attachmentOption func(*AttachmentMutation) + +// newAttachmentMutation creates new mutation for the Attachment entity. +func newAttachmentMutation(c config, op Op, opts ...attachmentOption) *AttachmentMutation { + m := &AttachmentMutation{ + config: c, + op: op, + typ: TypeAttachment, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withAttachmentID sets the ID field of the mutation. +func withAttachmentID(id uuid.UUID) attachmentOption { + return func(m *AttachmentMutation) { + var ( + err error + once sync.Once + value *Attachment + ) + m.oldValue = func(ctx context.Context) (*Attachment, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Attachment.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withAttachment sets the old Attachment of the mutation. +func withAttachment(node *Attachment) attachmentOption { + return func(m *AttachmentMutation) { + m.oldValue = func(context.Context) (*Attachment, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m AttachmentMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m AttachmentMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Attachment entities. +func (m *AttachmentMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *AttachmentMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *AttachmentMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Attachment.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetIsRemote sets the "isRemote" field. +func (m *AttachmentMutation) SetIsRemote(b bool) { + m.isRemote = &b +} + +// IsRemote returns the value of the "isRemote" field in the mutation. +func (m *AttachmentMutation) IsRemote() (r bool, exists bool) { + v := m.isRemote + if v == nil { + return + } + return *v, true +} + +// OldIsRemote returns the old "isRemote" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldIsRemote(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsRemote is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsRemote requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsRemote: %w", err) + } + return oldValue.IsRemote, nil +} + +// ResetIsRemote resets all changes to the "isRemote" field. +func (m *AttachmentMutation) ResetIsRemote() { + m.isRemote = nil +} + +// SetURI sets the "uri" field. +func (m *AttachmentMutation) SetURI(s string) { + m.uri = &s +} + +// URI returns the value of the "uri" field in the mutation. +func (m *AttachmentMutation) URI() (r string, exists bool) { + v := m.uri + if v == nil { + return + } + return *v, true +} + +// OldURI returns the old "uri" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURI: %w", err) + } + return oldValue.URI, nil +} + +// ResetURI resets all changes to the "uri" field. +func (m *AttachmentMutation) ResetURI() { + m.uri = nil +} + +// SetExtensions sets the "extensions" field. +func (m *AttachmentMutation) SetExtensions(l lysand.Extensions) { + m.extensions = &l +} + +// Extensions returns the value of the "extensions" field in the mutation. +func (m *AttachmentMutation) Extensions() (r lysand.Extensions, exists bool) { + v := m.extensions + if v == nil { + return + } + return *v, true +} + +// OldExtensions returns the old "extensions" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldExtensions(ctx context.Context) (v lysand.Extensions, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtensions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtensions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtensions: %w", err) + } + return oldValue.Extensions, nil +} + +// ResetExtensions resets all changes to the "extensions" field. +func (m *AttachmentMutation) ResetExtensions() { + m.extensions = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *AttachmentMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *AttachmentMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *AttachmentMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *AttachmentMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *AttachmentMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *AttachmentMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetDescription sets the "description" field. +func (m *AttachmentMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *AttachmentMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldDescription(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ResetDescription resets all changes to the "description" field. +func (m *AttachmentMutation) ResetDescription() { + m.description = nil +} + +// SetSha256 sets the "sha256" field. +func (m *AttachmentMutation) SetSha256(b []byte) { + m.sha256 = &b +} + +// Sha256 returns the value of the "sha256" field in the mutation. +func (m *AttachmentMutation) Sha256() (r []byte, exists bool) { + v := m.sha256 + if v == nil { + return + } + return *v, true +} + +// OldSha256 returns the old "sha256" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldSha256(ctx context.Context) (v []byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSha256 is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSha256 requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSha256: %w", err) + } + return oldValue.Sha256, nil +} + +// ResetSha256 resets all changes to the "sha256" field. +func (m *AttachmentMutation) ResetSha256() { + m.sha256 = nil +} + +// SetSize sets the "size" field. +func (m *AttachmentMutation) SetSize(i int) { + m.size = &i + m.addsize = nil +} + +// Size returns the value of the "size" field in the mutation. +func (m *AttachmentMutation) Size() (r int, exists bool) { + v := m.size + if v == nil { + return + } + return *v, true +} + +// OldSize returns the old "size" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldSize(ctx context.Context) (v int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSize is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSize requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSize: %w", err) + } + return oldValue.Size, nil +} + +// AddSize adds i to the "size" field. +func (m *AttachmentMutation) AddSize(i int) { + if m.addsize != nil { + *m.addsize += i + } else { + m.addsize = &i + } +} + +// AddedSize returns the value that was added to the "size" field in this mutation. +func (m *AttachmentMutation) AddedSize() (r int, exists bool) { + v := m.addsize + if v == nil { + return + } + return *v, true +} + +// ResetSize resets all changes to the "size" field. +func (m *AttachmentMutation) ResetSize() { + m.size = nil + m.addsize = nil +} + +// SetBlurhash sets the "blurhash" field. +func (m *AttachmentMutation) SetBlurhash(s string) { + m.blurhash = &s +} + +// Blurhash returns the value of the "blurhash" field in the mutation. +func (m *AttachmentMutation) Blurhash() (r string, exists bool) { + v := m.blurhash + if v == nil { + return + } + return *v, true +} + +// OldBlurhash returns the old "blurhash" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldBlurhash(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBlurhash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBlurhash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBlurhash: %w", err) + } + return oldValue.Blurhash, nil +} + +// ClearBlurhash clears the value of the "blurhash" field. +func (m *AttachmentMutation) ClearBlurhash() { + m.blurhash = nil + m.clearedFields[attachment.FieldBlurhash] = struct{}{} +} + +// BlurhashCleared returns if the "blurhash" field was cleared in this mutation. +func (m *AttachmentMutation) BlurhashCleared() bool { + _, ok := m.clearedFields[attachment.FieldBlurhash] + return ok +} + +// ResetBlurhash resets all changes to the "blurhash" field. +func (m *AttachmentMutation) ResetBlurhash() { + m.blurhash = nil + delete(m.clearedFields, attachment.FieldBlurhash) +} + +// SetHeight sets the "height" field. +func (m *AttachmentMutation) SetHeight(i int) { + m.height = &i + m.addheight = nil +} + +// Height returns the value of the "height" field in the mutation. +func (m *AttachmentMutation) Height() (r int, exists bool) { + v := m.height + if v == nil { + return + } + return *v, true +} + +// OldHeight returns the old "height" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldHeight(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldHeight is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldHeight requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldHeight: %w", err) + } + return oldValue.Height, nil +} + +// AddHeight adds i to the "height" field. +func (m *AttachmentMutation) AddHeight(i int) { + if m.addheight != nil { + *m.addheight += i + } else { + m.addheight = &i + } +} + +// AddedHeight returns the value that was added to the "height" field in this mutation. +func (m *AttachmentMutation) AddedHeight() (r int, exists bool) { + v := m.addheight + if v == nil { + return + } + return *v, true +} + +// ClearHeight clears the value of the "height" field. +func (m *AttachmentMutation) ClearHeight() { + m.height = nil + m.addheight = nil + m.clearedFields[attachment.FieldHeight] = struct{}{} +} + +// HeightCleared returns if the "height" field was cleared in this mutation. +func (m *AttachmentMutation) HeightCleared() bool { + _, ok := m.clearedFields[attachment.FieldHeight] + return ok +} + +// ResetHeight resets all changes to the "height" field. +func (m *AttachmentMutation) ResetHeight() { + m.height = nil + m.addheight = nil + delete(m.clearedFields, attachment.FieldHeight) +} + +// SetWidth sets the "width" field. +func (m *AttachmentMutation) SetWidth(i int) { + m.width = &i + m.addwidth = nil +} + +// Width returns the value of the "width" field in the mutation. +func (m *AttachmentMutation) Width() (r int, exists bool) { + v := m.width + if v == nil { + return + } + return *v, true +} + +// OldWidth returns the old "width" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldWidth(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldWidth is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldWidth requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldWidth: %w", err) + } + return oldValue.Width, nil +} + +// AddWidth adds i to the "width" field. +func (m *AttachmentMutation) AddWidth(i int) { + if m.addwidth != nil { + *m.addwidth += i + } else { + m.addwidth = &i + } +} + +// AddedWidth returns the value that was added to the "width" field in this mutation. +func (m *AttachmentMutation) AddedWidth() (r int, exists bool) { + v := m.addwidth + if v == nil { + return + } + return *v, true +} + +// ClearWidth clears the value of the "width" field. +func (m *AttachmentMutation) ClearWidth() { + m.width = nil + m.addwidth = nil + m.clearedFields[attachment.FieldWidth] = struct{}{} +} + +// WidthCleared returns if the "width" field was cleared in this mutation. +func (m *AttachmentMutation) WidthCleared() bool { + _, ok := m.clearedFields[attachment.FieldWidth] + return ok +} + +// ResetWidth resets all changes to the "width" field. +func (m *AttachmentMutation) ResetWidth() { + m.width = nil + m.addwidth = nil + delete(m.clearedFields, attachment.FieldWidth) +} + +// SetFps sets the "fps" field. +func (m *AttachmentMutation) SetFps(i int) { + m.fps = &i + m.addfps = nil +} + +// Fps returns the value of the "fps" field in the mutation. +func (m *AttachmentMutation) Fps() (r int, exists bool) { + v := m.fps + if v == nil { + return + } + return *v, true +} + +// OldFps returns the old "fps" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldFps(ctx context.Context) (v *int, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFps is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFps requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFps: %w", err) + } + return oldValue.Fps, nil +} + +// AddFps adds i to the "fps" field. +func (m *AttachmentMutation) AddFps(i int) { + if m.addfps != nil { + *m.addfps += i + } else { + m.addfps = &i + } +} + +// AddedFps returns the value that was added to the "fps" field in this mutation. +func (m *AttachmentMutation) AddedFps() (r int, exists bool) { + v := m.addfps + if v == nil { + return + } + return *v, true +} + +// ClearFps clears the value of the "fps" field. +func (m *AttachmentMutation) ClearFps() { + m.fps = nil + m.addfps = nil + m.clearedFields[attachment.FieldFps] = struct{}{} +} + +// FpsCleared returns if the "fps" field was cleared in this mutation. +func (m *AttachmentMutation) FpsCleared() bool { + _, ok := m.clearedFields[attachment.FieldFps] + return ok +} + +// ResetFps resets all changes to the "fps" field. +func (m *AttachmentMutation) ResetFps() { + m.fps = nil + m.addfps = nil + delete(m.clearedFields, attachment.FieldFps) +} + +// SetMimeType sets the "mimeType" field. +func (m *AttachmentMutation) SetMimeType(s string) { + m.mimeType = &s +} + +// MimeType returns the value of the "mimeType" field in the mutation. +func (m *AttachmentMutation) MimeType() (r string, exists bool) { + v := m.mimeType + if v == nil { + return + } + return *v, true +} + +// OldMimeType returns the old "mimeType" field's value of the Attachment entity. +// If the Attachment object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *AttachmentMutation) OldMimeType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMimeType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMimeType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMimeType: %w", err) + } + return oldValue.MimeType, nil +} + +// ResetMimeType resets all changes to the "mimeType" field. +func (m *AttachmentMutation) ResetMimeType() { + m.mimeType = nil +} + +// SetAuthorID sets the "author" edge to the User entity by id. +func (m *AttachmentMutation) SetAuthorID(id uuid.UUID) { + m.author = &id +} + +// ClearAuthor clears the "author" edge to the User entity. +func (m *AttachmentMutation) ClearAuthor() { + m.clearedauthor = true +} + +// AuthorCleared reports if the "author" edge to the User entity was cleared. +func (m *AttachmentMutation) AuthorCleared() bool { + return m.clearedauthor +} + +// AuthorID returns the "author" edge ID in the mutation. +func (m *AttachmentMutation) AuthorID() (id uuid.UUID, exists bool) { + if m.author != nil { + return *m.author, true + } + return +} + +// AuthorIDs returns the "author" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AuthorID instead. It exists only for internal usage by the builders. +func (m *AttachmentMutation) AuthorIDs() (ids []uuid.UUID) { + if id := m.author; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAuthor resets all changes to the "author" edge. +func (m *AttachmentMutation) ResetAuthor() { + m.author = nil + m.clearedauthor = false +} + +// Where appends a list predicates to the AttachmentMutation builder. +func (m *AttachmentMutation) Where(ps ...predicate.Attachment) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the AttachmentMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *AttachmentMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Attachment, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *AttachmentMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *AttachmentMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Attachment). +func (m *AttachmentMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *AttachmentMutation) Fields() []string { + fields := make([]string, 0, 13) + if m.isRemote != nil { + fields = append(fields, attachment.FieldIsRemote) + } + if m.uri != nil { + fields = append(fields, attachment.FieldURI) + } + if m.extensions != nil { + fields = append(fields, attachment.FieldExtensions) + } + if m.created_at != nil { + fields = append(fields, attachment.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, attachment.FieldUpdatedAt) + } + if m.description != nil { + fields = append(fields, attachment.FieldDescription) + } + if m.sha256 != nil { + fields = append(fields, attachment.FieldSha256) + } + if m.size != nil { + fields = append(fields, attachment.FieldSize) + } + if m.blurhash != nil { + fields = append(fields, attachment.FieldBlurhash) + } + if m.height != nil { + fields = append(fields, attachment.FieldHeight) + } + if m.width != nil { + fields = append(fields, attachment.FieldWidth) + } + if m.fps != nil { + fields = append(fields, attachment.FieldFps) + } + if m.mimeType != nil { + fields = append(fields, attachment.FieldMimeType) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *AttachmentMutation) Field(name string) (ent.Value, bool) { + switch name { + case attachment.FieldIsRemote: + return m.IsRemote() + case attachment.FieldURI: + return m.URI() + case attachment.FieldExtensions: + return m.Extensions() + case attachment.FieldCreatedAt: + return m.CreatedAt() + case attachment.FieldUpdatedAt: + return m.UpdatedAt() + case attachment.FieldDescription: + return m.Description() + case attachment.FieldSha256: + return m.Sha256() + case attachment.FieldSize: + return m.Size() + case attachment.FieldBlurhash: + return m.Blurhash() + case attachment.FieldHeight: + return m.Height() + case attachment.FieldWidth: + return m.Width() + case attachment.FieldFps: + return m.Fps() + case attachment.FieldMimeType: + return m.MimeType() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *AttachmentMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case attachment.FieldIsRemote: + return m.OldIsRemote(ctx) + case attachment.FieldURI: + return m.OldURI(ctx) + case attachment.FieldExtensions: + return m.OldExtensions(ctx) + case attachment.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case attachment.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case attachment.FieldDescription: + return m.OldDescription(ctx) + case attachment.FieldSha256: + return m.OldSha256(ctx) + case attachment.FieldSize: + return m.OldSize(ctx) + case attachment.FieldBlurhash: + return m.OldBlurhash(ctx) + case attachment.FieldHeight: + return m.OldHeight(ctx) + case attachment.FieldWidth: + return m.OldWidth(ctx) + case attachment.FieldFps: + return m.OldFps(ctx) + case attachment.FieldMimeType: + return m.OldMimeType(ctx) + } + return nil, fmt.Errorf("unknown Attachment field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AttachmentMutation) SetField(name string, value ent.Value) error { + switch name { + case attachment.FieldIsRemote: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsRemote(v) + return nil + case attachment.FieldURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURI(v) + return nil + case attachment.FieldExtensions: + v, ok := value.(lysand.Extensions) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtensions(v) + return nil + case attachment.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case attachment.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case attachment.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case attachment.FieldSha256: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSha256(v) + return nil + case attachment.FieldSize: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSize(v) + return nil + case attachment.FieldBlurhash: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBlurhash(v) + return nil + case attachment.FieldHeight: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetHeight(v) + return nil + case attachment.FieldWidth: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetWidth(v) + return nil + case attachment.FieldFps: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFps(v) + return nil + case attachment.FieldMimeType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMimeType(v) + return nil + } + return fmt.Errorf("unknown Attachment field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *AttachmentMutation) AddedFields() []string { + var fields []string + if m.addsize != nil { + fields = append(fields, attachment.FieldSize) + } + if m.addheight != nil { + fields = append(fields, attachment.FieldHeight) + } + if m.addwidth != nil { + fields = append(fields, attachment.FieldWidth) + } + if m.addfps != nil { + fields = append(fields, attachment.FieldFps) + } + return fields +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *AttachmentMutation) AddedField(name string) (ent.Value, bool) { + switch name { + case attachment.FieldSize: + return m.AddedSize() + case attachment.FieldHeight: + return m.AddedHeight() + case attachment.FieldWidth: + return m.AddedWidth() + case attachment.FieldFps: + return m.AddedFps() + } + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *AttachmentMutation) AddField(name string, value ent.Value) error { + switch name { + case attachment.FieldSize: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddSize(v) + return nil + case attachment.FieldHeight: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddHeight(v) + return nil + case attachment.FieldWidth: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddWidth(v) + return nil + case attachment.FieldFps: + v, ok := value.(int) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.AddFps(v) + return nil + } + return fmt.Errorf("unknown Attachment numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *AttachmentMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(attachment.FieldBlurhash) { + fields = append(fields, attachment.FieldBlurhash) + } + if m.FieldCleared(attachment.FieldHeight) { + fields = append(fields, attachment.FieldHeight) + } + if m.FieldCleared(attachment.FieldWidth) { + fields = append(fields, attachment.FieldWidth) + } + if m.FieldCleared(attachment.FieldFps) { + fields = append(fields, attachment.FieldFps) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *AttachmentMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *AttachmentMutation) ClearField(name string) error { + switch name { + case attachment.FieldBlurhash: + m.ClearBlurhash() + return nil + case attachment.FieldHeight: + m.ClearHeight() + return nil + case attachment.FieldWidth: + m.ClearWidth() + return nil + case attachment.FieldFps: + m.ClearFps() + return nil + } + return fmt.Errorf("unknown Attachment nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *AttachmentMutation) ResetField(name string) error { + switch name { + case attachment.FieldIsRemote: + m.ResetIsRemote() + return nil + case attachment.FieldURI: + m.ResetURI() + return nil + case attachment.FieldExtensions: + m.ResetExtensions() + return nil + case attachment.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case attachment.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case attachment.FieldDescription: + m.ResetDescription() + return nil + case attachment.FieldSha256: + m.ResetSha256() + return nil + case attachment.FieldSize: + m.ResetSize() + return nil + case attachment.FieldBlurhash: + m.ResetBlurhash() + return nil + case attachment.FieldHeight: + m.ResetHeight() + return nil + case attachment.FieldWidth: + m.ResetWidth() + return nil + case attachment.FieldFps: + m.ResetFps() + return nil + case attachment.FieldMimeType: + m.ResetMimeType() + return nil + } + return fmt.Errorf("unknown Attachment field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *AttachmentMutation) AddedEdges() []string { + edges := make([]string, 0, 1) + if m.author != nil { + edges = append(edges, attachment.EdgeAuthor) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *AttachmentMutation) AddedIDs(name string) []ent.Value { + switch name { + case attachment.EdgeAuthor: + if id := m.author; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *AttachmentMutation) RemovedEdges() []string { + edges := make([]string, 0, 1) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *AttachmentMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *AttachmentMutation) ClearedEdges() []string { + edges := make([]string, 0, 1) + if m.clearedauthor { + edges = append(edges, attachment.EdgeAuthor) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *AttachmentMutation) EdgeCleared(name string) bool { + switch name { + case attachment.EdgeAuthor: + return m.clearedauthor + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *AttachmentMutation) ClearEdge(name string) error { + switch name { + case attachment.EdgeAuthor: + m.ClearAuthor() + return nil + } + return fmt.Errorf("unknown Attachment unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *AttachmentMutation) ResetEdge(name string) error { + switch name { + case attachment.EdgeAuthor: + m.ResetAuthor() + return nil + } + return fmt.Errorf("unknown Attachment edge %s", name) +} + +// FollowMutation represents an operation that mutates the Follow nodes in the graph. +type FollowMutation struct { + config + op Op + typ string + id *uuid.UUID + isRemote *bool + uri *string + extensions *lysand.Extensions + created_at *time.Time + updated_at *time.Time + status *follow.Status + clearedFields map[string]struct{} + follower *uuid.UUID + clearedfollower bool + followee *uuid.UUID + clearedfollowee bool + done bool + oldValue func(context.Context) (*Follow, error) + predicates []predicate.Follow +} + +var _ ent.Mutation = (*FollowMutation)(nil) + +// followOption allows management of the mutation configuration using functional options. +type followOption func(*FollowMutation) + +// newFollowMutation creates new mutation for the Follow entity. +func newFollowMutation(c config, op Op, opts ...followOption) *FollowMutation { + m := &FollowMutation{ + config: c, + op: op, + typ: TypeFollow, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withFollowID sets the ID field of the mutation. +func withFollowID(id uuid.UUID) followOption { + return func(m *FollowMutation) { + var ( + err error + once sync.Once + value *Follow + ) + m.oldValue = func(ctx context.Context) (*Follow, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Follow.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withFollow sets the old Follow of the mutation. +func withFollow(node *Follow) followOption { + return func(m *FollowMutation) { + m.oldValue = func(context.Context) (*Follow, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m FollowMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m FollowMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Follow entities. +func (m *FollowMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *FollowMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *FollowMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Follow.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetIsRemote sets the "isRemote" field. +func (m *FollowMutation) SetIsRemote(b bool) { + m.isRemote = &b +} + +// IsRemote returns the value of the "isRemote" field in the mutation. +func (m *FollowMutation) IsRemote() (r bool, exists bool) { + v := m.isRemote + if v == nil { + return + } + return *v, true +} + +// OldIsRemote returns the old "isRemote" field's value of the Follow entity. +// If the Follow object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *FollowMutation) OldIsRemote(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsRemote is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsRemote requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsRemote: %w", err) + } + return oldValue.IsRemote, nil +} + +// ResetIsRemote resets all changes to the "isRemote" field. +func (m *FollowMutation) ResetIsRemote() { + m.isRemote = nil +} + +// SetURI sets the "uri" field. +func (m *FollowMutation) SetURI(s string) { + m.uri = &s +} + +// URI returns the value of the "uri" field in the mutation. +func (m *FollowMutation) URI() (r string, exists bool) { + v := m.uri + if v == nil { + return + } + return *v, true +} + +// OldURI returns the old "uri" field's value of the Follow entity. +// If the Follow object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *FollowMutation) OldURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURI: %w", err) + } + return oldValue.URI, nil +} + +// ResetURI resets all changes to the "uri" field. +func (m *FollowMutation) ResetURI() { + m.uri = nil +} + +// SetExtensions sets the "extensions" field. +func (m *FollowMutation) SetExtensions(l lysand.Extensions) { + m.extensions = &l +} + +// Extensions returns the value of the "extensions" field in the mutation. +func (m *FollowMutation) Extensions() (r lysand.Extensions, exists bool) { + v := m.extensions + if v == nil { + return + } + return *v, true +} + +// OldExtensions returns the old "extensions" field's value of the Follow entity. +// If the Follow object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *FollowMutation) OldExtensions(ctx context.Context) (v lysand.Extensions, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtensions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtensions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtensions: %w", err) + } + return oldValue.Extensions, nil +} + +// ResetExtensions resets all changes to the "extensions" field. +func (m *FollowMutation) ResetExtensions() { + m.extensions = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *FollowMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *FollowMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Follow entity. +// If the Follow object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *FollowMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *FollowMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *FollowMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *FollowMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Follow entity. +// If the Follow object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *FollowMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *FollowMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetStatus sets the "status" field. +func (m *FollowMutation) SetStatus(f follow.Status) { + m.status = &f +} + +// Status returns the value of the "status" field in the mutation. +func (m *FollowMutation) Status() (r follow.Status, exists bool) { + v := m.status + if v == nil { + return + } + return *v, true +} + +// OldStatus returns the old "status" field's value of the Follow entity. +// If the Follow object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *FollowMutation) OldStatus(ctx context.Context) (v follow.Status, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldStatus is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldStatus requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldStatus: %w", err) + } + return oldValue.Status, nil +} + +// ResetStatus resets all changes to the "status" field. +func (m *FollowMutation) ResetStatus() { + m.status = nil +} + +// SetFollowerID sets the "follower" edge to the User entity by id. +func (m *FollowMutation) SetFollowerID(id uuid.UUID) { + m.follower = &id +} + +// ClearFollower clears the "follower" edge to the User entity. +func (m *FollowMutation) ClearFollower() { + m.clearedfollower = true +} + +// FollowerCleared reports if the "follower" edge to the User entity was cleared. +func (m *FollowMutation) FollowerCleared() bool { + return m.clearedfollower +} + +// FollowerID returns the "follower" edge ID in the mutation. +func (m *FollowMutation) FollowerID() (id uuid.UUID, exists bool) { + if m.follower != nil { + return *m.follower, true + } + return +} + +// FollowerIDs returns the "follower" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// FollowerID instead. It exists only for internal usage by the builders. +func (m *FollowMutation) FollowerIDs() (ids []uuid.UUID) { + if id := m.follower; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetFollower resets all changes to the "follower" edge. +func (m *FollowMutation) ResetFollower() { + m.follower = nil + m.clearedfollower = false +} + +// SetFolloweeID sets the "followee" edge to the User entity by id. +func (m *FollowMutation) SetFolloweeID(id uuid.UUID) { + m.followee = &id +} + +// ClearFollowee clears the "followee" edge to the User entity. +func (m *FollowMutation) ClearFollowee() { + m.clearedfollowee = true +} + +// FolloweeCleared reports if the "followee" edge to the User entity was cleared. +func (m *FollowMutation) FolloweeCleared() bool { + return m.clearedfollowee +} + +// FolloweeID returns the "followee" edge ID in the mutation. +func (m *FollowMutation) FolloweeID() (id uuid.UUID, exists bool) { + if m.followee != nil { + return *m.followee, true + } + return +} + +// FolloweeIDs returns the "followee" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// FolloweeID instead. It exists only for internal usage by the builders. +func (m *FollowMutation) FolloweeIDs() (ids []uuid.UUID) { + if id := m.followee; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetFollowee resets all changes to the "followee" edge. +func (m *FollowMutation) ResetFollowee() { + m.followee = nil + m.clearedfollowee = false +} + +// Where appends a list predicates to the FollowMutation builder. +func (m *FollowMutation) Where(ps ...predicate.Follow) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the FollowMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *FollowMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Follow, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *FollowMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *FollowMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Follow). +func (m *FollowMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *FollowMutation) Fields() []string { + fields := make([]string, 0, 6) + if m.isRemote != nil { + fields = append(fields, follow.FieldIsRemote) + } + if m.uri != nil { + fields = append(fields, follow.FieldURI) + } + if m.extensions != nil { + fields = append(fields, follow.FieldExtensions) + } + if m.created_at != nil { + fields = append(fields, follow.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, follow.FieldUpdatedAt) + } + if m.status != nil { + fields = append(fields, follow.FieldStatus) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *FollowMutation) Field(name string) (ent.Value, bool) { + switch name { + case follow.FieldIsRemote: + return m.IsRemote() + case follow.FieldURI: + return m.URI() + case follow.FieldExtensions: + return m.Extensions() + case follow.FieldCreatedAt: + return m.CreatedAt() + case follow.FieldUpdatedAt: + return m.UpdatedAt() + case follow.FieldStatus: + return m.Status() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *FollowMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case follow.FieldIsRemote: + return m.OldIsRemote(ctx) + case follow.FieldURI: + return m.OldURI(ctx) + case follow.FieldExtensions: + return m.OldExtensions(ctx) + case follow.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case follow.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case follow.FieldStatus: + return m.OldStatus(ctx) + } + return nil, fmt.Errorf("unknown Follow field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *FollowMutation) SetField(name string, value ent.Value) error { + switch name { + case follow.FieldIsRemote: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsRemote(v) + return nil + case follow.FieldURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURI(v) + return nil + case follow.FieldExtensions: + v, ok := value.(lysand.Extensions) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtensions(v) + return nil + case follow.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case follow.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case follow.FieldStatus: + v, ok := value.(follow.Status) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetStatus(v) + return nil + } + return fmt.Errorf("unknown Follow field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *FollowMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *FollowMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *FollowMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Follow numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *FollowMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *FollowMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *FollowMutation) ClearField(name string) error { + return fmt.Errorf("unknown Follow nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *FollowMutation) ResetField(name string) error { + switch name { + case follow.FieldIsRemote: + m.ResetIsRemote() + return nil + case follow.FieldURI: + m.ResetURI() + return nil + case follow.FieldExtensions: + m.ResetExtensions() + return nil + case follow.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case follow.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case follow.FieldStatus: + m.ResetStatus() + return nil + } + return fmt.Errorf("unknown Follow field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *FollowMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.follower != nil { + edges = append(edges, follow.EdgeFollower) + } + if m.followee != nil { + edges = append(edges, follow.EdgeFollowee) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *FollowMutation) AddedIDs(name string) []ent.Value { + switch name { + case follow.EdgeFollower: + if id := m.follower; id != nil { + return []ent.Value{*id} + } + case follow.EdgeFollowee: + if id := m.followee; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *FollowMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *FollowMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *FollowMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedfollower { + edges = append(edges, follow.EdgeFollower) + } + if m.clearedfollowee { + edges = append(edges, follow.EdgeFollowee) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *FollowMutation) EdgeCleared(name string) bool { + switch name { + case follow.EdgeFollower: + return m.clearedfollower + case follow.EdgeFollowee: + return m.clearedfollowee + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *FollowMutation) ClearEdge(name string) error { + switch name { + case follow.EdgeFollower: + m.ClearFollower() + return nil + case follow.EdgeFollowee: + m.ClearFollowee() + return nil + } + return fmt.Errorf("unknown Follow unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *FollowMutation) ResetEdge(name string) error { + switch name { + case follow.EdgeFollower: + m.ResetFollower() + return nil + case follow.EdgeFollowee: + m.ResetFollowee() + return nil + } + return fmt.Errorf("unknown Follow edge %s", name) +} + +// ImageMutation represents an operation that mutates the Image nodes in the graph. +type ImageMutation struct { + config + op Op + typ string + id *int + url *string + mimeType *string + clearedFields map[string]struct{} + done bool + oldValue func(context.Context) (*Image, error) + predicates []predicate.Image +} + +var _ ent.Mutation = (*ImageMutation)(nil) + +// imageOption allows management of the mutation configuration using functional options. +type imageOption func(*ImageMutation) + +// newImageMutation creates new mutation for the Image entity. +func newImageMutation(c config, op Op, opts ...imageOption) *ImageMutation { + m := &ImageMutation{ + config: c, + op: op, + typ: TypeImage, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withImageID sets the ID field of the mutation. +func withImageID(id int) imageOption { + return func(m *ImageMutation) { + var ( + err error + once sync.Once + value *Image + ) + m.oldValue = func(ctx context.Context) (*Image, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Image.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withImage sets the old Image of the mutation. +func withImage(node *Image) imageOption { + return func(m *ImageMutation) { + m.oldValue = func(context.Context) (*Image, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ImageMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ImageMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ImageMutation) ID() (id int, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ImageMutation) IDs(ctx context.Context) ([]int, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []int{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Image.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetURL sets the "url" field. +func (m *ImageMutation) SetURL(s string) { + m.url = &s +} + +// URL returns the value of the "url" field in the mutation. +func (m *ImageMutation) URL() (r string, exists bool) { + v := m.url + if v == nil { + return + } + return *v, true +} + +// OldURL returns the old "url" field's value of the Image entity. +// If the Image object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ImageMutation) OldURL(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURL is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURL requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURL: %w", err) + } + return oldValue.URL, nil +} + +// ResetURL resets all changes to the "url" field. +func (m *ImageMutation) ResetURL() { + m.url = nil +} + +// SetMimeType sets the "mimeType" field. +func (m *ImageMutation) SetMimeType(s string) { + m.mimeType = &s +} + +// MimeType returns the value of the "mimeType" field in the mutation. +func (m *ImageMutation) MimeType() (r string, exists bool) { + v := m.mimeType + if v == nil { + return + } + return *v, true +} + +// OldMimeType returns the old "mimeType" field's value of the Image entity. +// If the Image object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ImageMutation) OldMimeType(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldMimeType is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldMimeType requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldMimeType: %w", err) + } + return oldValue.MimeType, nil +} + +// ResetMimeType resets all changes to the "mimeType" field. +func (m *ImageMutation) ResetMimeType() { + m.mimeType = nil +} + +// Where appends a list predicates to the ImageMutation builder. +func (m *ImageMutation) Where(ps ...predicate.Image) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ImageMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ImageMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Image, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ImageMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ImageMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Image). +func (m *ImageMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ImageMutation) Fields() []string { + fields := make([]string, 0, 2) + if m.url != nil { + fields = append(fields, image.FieldURL) + } + if m.mimeType != nil { + fields = append(fields, image.FieldMimeType) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ImageMutation) Field(name string) (ent.Value, bool) { + switch name { + case image.FieldURL: + return m.URL() + case image.FieldMimeType: + return m.MimeType() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ImageMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case image.FieldURL: + return m.OldURL(ctx) + case image.FieldMimeType: + return m.OldMimeType(ctx) + } + return nil, fmt.Errorf("unknown Image field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ImageMutation) SetField(name string, value ent.Value) error { + switch name { + case image.FieldURL: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURL(v) + return nil + case image.FieldMimeType: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetMimeType(v) + return nil + } + return fmt.Errorf("unknown Image field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ImageMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ImageMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ImageMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Image numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ImageMutation) ClearedFields() []string { + return nil +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ImageMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ImageMutation) ClearField(name string) error { + return fmt.Errorf("unknown Image nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ImageMutation) ResetField(name string) error { + switch name { + case image.FieldURL: + m.ResetURL() + return nil + case image.FieldMimeType: + m.ResetMimeType() + return nil + } + return fmt.Errorf("unknown Image field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ImageMutation) AddedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ImageMutation) AddedIDs(name string) []ent.Value { + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ImageMutation) RemovedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ImageMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ImageMutation) ClearedEdges() []string { + edges := make([]string, 0, 0) + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ImageMutation) EdgeCleared(name string) bool { + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ImageMutation) ClearEdge(name string) error { + return fmt.Errorf("unknown Image unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ImageMutation) ResetEdge(name string) error { + return fmt.Errorf("unknown Image edge %s", name) +} + +// NoteMutation represents an operation that mutates the Note nodes in the graph. +type NoteMutation struct { + config + op Op + typ string + id *uuid.UUID + isRemote *bool + uri *string + extensions *lysand.Extensions + created_at *time.Time + updated_at *time.Time + subject *string + content *string + isSensitive *bool + visibility *note.Visibility + clearedFields map[string]struct{} + author *uuid.UUID + clearedauthor bool + mentions map[uuid.UUID]struct{} + removedmentions map[uuid.UUID]struct{} + clearedmentions bool + attachments map[uuid.UUID]struct{} + removedattachments map[uuid.UUID]struct{} + clearedattachments bool + done bool + oldValue func(context.Context) (*Note, error) + predicates []predicate.Note +} + +var _ ent.Mutation = (*NoteMutation)(nil) + +// noteOption allows management of the mutation configuration using functional options. +type noteOption func(*NoteMutation) + +// newNoteMutation creates new mutation for the Note entity. +func newNoteMutation(c config, op Op, opts ...noteOption) *NoteMutation { + m := &NoteMutation{ + config: c, + op: op, + typ: TypeNote, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withNoteID sets the ID field of the mutation. +func withNoteID(id uuid.UUID) noteOption { + return func(m *NoteMutation) { + var ( + err error + once sync.Once + value *Note + ) + m.oldValue = func(ctx context.Context) (*Note, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().Note.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withNote sets the old Note of the mutation. +func withNote(node *Note) noteOption { + return func(m *NoteMutation) { + m.oldValue = func(context.Context) (*Note, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m NoteMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m NoteMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of Note entities. +func (m *NoteMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *NoteMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *NoteMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().Note.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetIsRemote sets the "isRemote" field. +func (m *NoteMutation) SetIsRemote(b bool) { + m.isRemote = &b +} + +// IsRemote returns the value of the "isRemote" field in the mutation. +func (m *NoteMutation) IsRemote() (r bool, exists bool) { + v := m.isRemote + if v == nil { + return + } + return *v, true +} + +// OldIsRemote returns the old "isRemote" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldIsRemote(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsRemote is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsRemote requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsRemote: %w", err) + } + return oldValue.IsRemote, nil +} + +// ResetIsRemote resets all changes to the "isRemote" field. +func (m *NoteMutation) ResetIsRemote() { + m.isRemote = nil +} + +// SetURI sets the "uri" field. +func (m *NoteMutation) SetURI(s string) { + m.uri = &s +} + +// URI returns the value of the "uri" field in the mutation. +func (m *NoteMutation) URI() (r string, exists bool) { + v := m.uri + if v == nil { + return + } + return *v, true +} + +// OldURI returns the old "uri" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURI: %w", err) + } + return oldValue.URI, nil +} + +// ResetURI resets all changes to the "uri" field. +func (m *NoteMutation) ResetURI() { + m.uri = nil +} + +// SetExtensions sets the "extensions" field. +func (m *NoteMutation) SetExtensions(l lysand.Extensions) { + m.extensions = &l +} + +// Extensions returns the value of the "extensions" field in the mutation. +func (m *NoteMutation) Extensions() (r lysand.Extensions, exists bool) { + v := m.extensions + if v == nil { + return + } + return *v, true +} + +// OldExtensions returns the old "extensions" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldExtensions(ctx context.Context) (v lysand.Extensions, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtensions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtensions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtensions: %w", err) + } + return oldValue.Extensions, nil +} + +// ResetExtensions resets all changes to the "extensions" field. +func (m *NoteMutation) ResetExtensions() { + m.extensions = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *NoteMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *NoteMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *NoteMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *NoteMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *NoteMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *NoteMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetSubject sets the "subject" field. +func (m *NoteMutation) SetSubject(s string) { + m.subject = &s +} + +// Subject returns the value of the "subject" field in the mutation. +func (m *NoteMutation) Subject() (r string, exists bool) { + v := m.subject + if v == nil { + return + } + return *v, true +} + +// OldSubject returns the old "subject" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldSubject(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSubject is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSubject requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSubject: %w", err) + } + return oldValue.Subject, nil +} + +// ClearSubject clears the value of the "subject" field. +func (m *NoteMutation) ClearSubject() { + m.subject = nil + m.clearedFields[note.FieldSubject] = struct{}{} +} + +// SubjectCleared returns if the "subject" field was cleared in this mutation. +func (m *NoteMutation) SubjectCleared() bool { + _, ok := m.clearedFields[note.FieldSubject] + return ok +} + +// ResetSubject resets all changes to the "subject" field. +func (m *NoteMutation) ResetSubject() { + m.subject = nil + delete(m.clearedFields, note.FieldSubject) +} + +// SetContent sets the "content" field. +func (m *NoteMutation) SetContent(s string) { + m.content = &s +} + +// Content returns the value of the "content" field in the mutation. +func (m *NoteMutation) Content() (r string, exists bool) { + v := m.content + if v == nil { + return + } + return *v, true +} + +// OldContent returns the old "content" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldContent(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldContent is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldContent requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldContent: %w", err) + } + return oldValue.Content, nil +} + +// ResetContent resets all changes to the "content" field. +func (m *NoteMutation) ResetContent() { + m.content = nil +} + +// SetIsSensitive sets the "isSensitive" field. +func (m *NoteMutation) SetIsSensitive(b bool) { + m.isSensitive = &b +} + +// IsSensitive returns the value of the "isSensitive" field in the mutation. +func (m *NoteMutation) IsSensitive() (r bool, exists bool) { + v := m.isSensitive + if v == nil { + return + } + return *v, true +} + +// OldIsSensitive returns the old "isSensitive" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldIsSensitive(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsSensitive is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsSensitive requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsSensitive: %w", err) + } + return oldValue.IsSensitive, nil +} + +// ResetIsSensitive resets all changes to the "isSensitive" field. +func (m *NoteMutation) ResetIsSensitive() { + m.isSensitive = nil +} + +// SetVisibility sets the "visibility" field. +func (m *NoteMutation) SetVisibility(n note.Visibility) { + m.visibility = &n +} + +// Visibility returns the value of the "visibility" field in the mutation. +func (m *NoteMutation) Visibility() (r note.Visibility, exists bool) { + v := m.visibility + if v == nil { + return + } + return *v, true +} + +// OldVisibility returns the old "visibility" field's value of the Note entity. +// If the Note object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *NoteMutation) OldVisibility(ctx context.Context) (v note.Visibility, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldVisibility is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldVisibility requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVisibility: %w", err) + } + return oldValue.Visibility, nil +} + +// ResetVisibility resets all changes to the "visibility" field. +func (m *NoteMutation) ResetVisibility() { + m.visibility = nil +} + +// SetAuthorID sets the "author" edge to the User entity by id. +func (m *NoteMutation) SetAuthorID(id uuid.UUID) { + m.author = &id +} + +// ClearAuthor clears the "author" edge to the User entity. +func (m *NoteMutation) ClearAuthor() { + m.clearedauthor = true +} + +// AuthorCleared reports if the "author" edge to the User entity was cleared. +func (m *NoteMutation) AuthorCleared() bool { + return m.clearedauthor +} + +// AuthorID returns the "author" edge ID in the mutation. +func (m *NoteMutation) AuthorID() (id uuid.UUID, exists bool) { + if m.author != nil { + return *m.author, true + } + return +} + +// AuthorIDs returns the "author" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AuthorID instead. It exists only for internal usage by the builders. +func (m *NoteMutation) AuthorIDs() (ids []uuid.UUID) { + if id := m.author; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAuthor resets all changes to the "author" edge. +func (m *NoteMutation) ResetAuthor() { + m.author = nil + m.clearedauthor = false +} + +// AddMentionIDs adds the "mentions" edge to the User entity by ids. +func (m *NoteMutation) AddMentionIDs(ids ...uuid.UUID) { + if m.mentions == nil { + m.mentions = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.mentions[ids[i]] = struct{}{} + } +} + +// ClearMentions clears the "mentions" edge to the User entity. +func (m *NoteMutation) ClearMentions() { + m.clearedmentions = true +} + +// MentionsCleared reports if the "mentions" edge to the User entity was cleared. +func (m *NoteMutation) MentionsCleared() bool { + return m.clearedmentions +} + +// RemoveMentionIDs removes the "mentions" edge to the User entity by IDs. +func (m *NoteMutation) RemoveMentionIDs(ids ...uuid.UUID) { + if m.removedmentions == nil { + m.removedmentions = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.mentions, ids[i]) + m.removedmentions[ids[i]] = struct{}{} + } +} + +// RemovedMentions returns the removed IDs of the "mentions" edge to the User entity. +func (m *NoteMutation) RemovedMentionsIDs() (ids []uuid.UUID) { + for id := range m.removedmentions { + ids = append(ids, id) + } + return +} + +// MentionsIDs returns the "mentions" edge IDs in the mutation. +func (m *NoteMutation) MentionsIDs() (ids []uuid.UUID) { + for id := range m.mentions { + ids = append(ids, id) + } + return +} + +// ResetMentions resets all changes to the "mentions" edge. +func (m *NoteMutation) ResetMentions() { + m.mentions = nil + m.clearedmentions = false + m.removedmentions = nil +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by ids. +func (m *NoteMutation) AddAttachmentIDs(ids ...uuid.UUID) { + if m.attachments == nil { + m.attachments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.attachments[ids[i]] = struct{}{} + } +} + +// ClearAttachments clears the "attachments" edge to the Attachment entity. +func (m *NoteMutation) ClearAttachments() { + m.clearedattachments = true +} + +// AttachmentsCleared reports if the "attachments" edge to the Attachment entity was cleared. +func (m *NoteMutation) AttachmentsCleared() bool { + return m.clearedattachments +} + +// RemoveAttachmentIDs removes the "attachments" edge to the Attachment entity by IDs. +func (m *NoteMutation) RemoveAttachmentIDs(ids ...uuid.UUID) { + if m.removedattachments == nil { + m.removedattachments = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.attachments, ids[i]) + m.removedattachments[ids[i]] = struct{}{} + } +} + +// RemovedAttachments returns the removed IDs of the "attachments" edge to the Attachment entity. +func (m *NoteMutation) RemovedAttachmentsIDs() (ids []uuid.UUID) { + for id := range m.removedattachments { + ids = append(ids, id) + } + return +} + +// AttachmentsIDs returns the "attachments" edge IDs in the mutation. +func (m *NoteMutation) AttachmentsIDs() (ids []uuid.UUID) { + for id := range m.attachments { + ids = append(ids, id) + } + return +} + +// ResetAttachments resets all changes to the "attachments" edge. +func (m *NoteMutation) ResetAttachments() { + m.attachments = nil + m.clearedattachments = false + m.removedattachments = nil +} + +// Where appends a list predicates to the NoteMutation builder. +func (m *NoteMutation) Where(ps ...predicate.Note) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the NoteMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *NoteMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.Note, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *NoteMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *NoteMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (Note). +func (m *NoteMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *NoteMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.isRemote != nil { + fields = append(fields, note.FieldIsRemote) + } + if m.uri != nil { + fields = append(fields, note.FieldURI) + } + if m.extensions != nil { + fields = append(fields, note.FieldExtensions) + } + if m.created_at != nil { + fields = append(fields, note.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, note.FieldUpdatedAt) + } + if m.subject != nil { + fields = append(fields, note.FieldSubject) + } + if m.content != nil { + fields = append(fields, note.FieldContent) + } + if m.isSensitive != nil { + fields = append(fields, note.FieldIsSensitive) + } + if m.visibility != nil { + fields = append(fields, note.FieldVisibility) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *NoteMutation) Field(name string) (ent.Value, bool) { + switch name { + case note.FieldIsRemote: + return m.IsRemote() + case note.FieldURI: + return m.URI() + case note.FieldExtensions: + return m.Extensions() + case note.FieldCreatedAt: + return m.CreatedAt() + case note.FieldUpdatedAt: + return m.UpdatedAt() + case note.FieldSubject: + return m.Subject() + case note.FieldContent: + return m.Content() + case note.FieldIsSensitive: + return m.IsSensitive() + case note.FieldVisibility: + return m.Visibility() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *NoteMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case note.FieldIsRemote: + return m.OldIsRemote(ctx) + case note.FieldURI: + return m.OldURI(ctx) + case note.FieldExtensions: + return m.OldExtensions(ctx) + case note.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case note.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case note.FieldSubject: + return m.OldSubject(ctx) + case note.FieldContent: + return m.OldContent(ctx) + case note.FieldIsSensitive: + return m.OldIsSensitive(ctx) + case note.FieldVisibility: + return m.OldVisibility(ctx) + } + return nil, fmt.Errorf("unknown Note field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NoteMutation) SetField(name string, value ent.Value) error { + switch name { + case note.FieldIsRemote: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsRemote(v) + return nil + case note.FieldURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURI(v) + return nil + case note.FieldExtensions: + v, ok := value.(lysand.Extensions) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtensions(v) + return nil + case note.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case note.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case note.FieldSubject: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSubject(v) + return nil + case note.FieldContent: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetContent(v) + return nil + case note.FieldIsSensitive: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsSensitive(v) + return nil + case note.FieldVisibility: + v, ok := value.(note.Visibility) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVisibility(v) + return nil + } + return fmt.Errorf("unknown Note field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *NoteMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *NoteMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *NoteMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown Note numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *NoteMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(note.FieldSubject) { + fields = append(fields, note.FieldSubject) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *NoteMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *NoteMutation) ClearField(name string) error { + switch name { + case note.FieldSubject: + m.ClearSubject() + return nil + } + return fmt.Errorf("unknown Note nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *NoteMutation) ResetField(name string) error { + switch name { + case note.FieldIsRemote: + m.ResetIsRemote() + return nil + case note.FieldURI: + m.ResetURI() + return nil + case note.FieldExtensions: + m.ResetExtensions() + return nil + case note.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case note.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case note.FieldSubject: + m.ResetSubject() + return nil + case note.FieldContent: + m.ResetContent() + return nil + case note.FieldIsSensitive: + m.ResetIsSensitive() + return nil + case note.FieldVisibility: + m.ResetVisibility() + return nil + } + return fmt.Errorf("unknown Note field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *NoteMutation) AddedEdges() []string { + edges := make([]string, 0, 3) + if m.author != nil { + edges = append(edges, note.EdgeAuthor) + } + if m.mentions != nil { + edges = append(edges, note.EdgeMentions) + } + if m.attachments != nil { + edges = append(edges, note.EdgeAttachments) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *NoteMutation) AddedIDs(name string) []ent.Value { + switch name { + case note.EdgeAuthor: + if id := m.author; id != nil { + return []ent.Value{*id} + } + case note.EdgeMentions: + ids := make([]ent.Value, 0, len(m.mentions)) + for id := range m.mentions { + ids = append(ids, id) + } + return ids + case note.EdgeAttachments: + ids := make([]ent.Value, 0, len(m.attachments)) + for id := range m.attachments { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *NoteMutation) RemovedEdges() []string { + edges := make([]string, 0, 3) + if m.removedmentions != nil { + edges = append(edges, note.EdgeMentions) + } + if m.removedattachments != nil { + edges = append(edges, note.EdgeAttachments) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *NoteMutation) RemovedIDs(name string) []ent.Value { + switch name { + case note.EdgeMentions: + ids := make([]ent.Value, 0, len(m.removedmentions)) + for id := range m.removedmentions { + ids = append(ids, id) + } + return ids + case note.EdgeAttachments: + ids := make([]ent.Value, 0, len(m.removedattachments)) + for id := range m.removedattachments { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *NoteMutation) ClearedEdges() []string { + edges := make([]string, 0, 3) + if m.clearedauthor { + edges = append(edges, note.EdgeAuthor) + } + if m.clearedmentions { + edges = append(edges, note.EdgeMentions) + } + if m.clearedattachments { + edges = append(edges, note.EdgeAttachments) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *NoteMutation) EdgeCleared(name string) bool { + switch name { + case note.EdgeAuthor: + return m.clearedauthor + case note.EdgeMentions: + return m.clearedmentions + case note.EdgeAttachments: + return m.clearedattachments + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *NoteMutation) ClearEdge(name string) error { + switch name { + case note.EdgeAuthor: + m.ClearAuthor() + return nil + } + return fmt.Errorf("unknown Note unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *NoteMutation) ResetEdge(name string) error { + switch name { + case note.EdgeAuthor: + m.ResetAuthor() + return nil + case note.EdgeMentions: + m.ResetMentions() + return nil + case note.EdgeAttachments: + m.ResetAttachments() + return nil + } + return fmt.Errorf("unknown Note edge %s", name) +} + +// ServerMetadataMutation represents an operation that mutates the ServerMetadata nodes in the graph. +type ServerMetadataMutation struct { + config + op Op + typ string + id *uuid.UUID + isRemote *bool + uri *string + extensions *lysand.Extensions + created_at *time.Time + updated_at *time.Time + name *string + description *string + version *string + supportedExtensions *[]string + appendsupportedExtensions []string + clearedFields map[string]struct{} + follower *uuid.UUID + clearedfollower bool + followee *uuid.UUID + clearedfollowee bool + done bool + oldValue func(context.Context) (*ServerMetadata, error) + predicates []predicate.ServerMetadata +} + +var _ ent.Mutation = (*ServerMetadataMutation)(nil) + +// servermetadataOption allows management of the mutation configuration using functional options. +type servermetadataOption func(*ServerMetadataMutation) + +// newServerMetadataMutation creates new mutation for the ServerMetadata entity. +func newServerMetadataMutation(c config, op Op, opts ...servermetadataOption) *ServerMetadataMutation { + m := &ServerMetadataMutation{ + config: c, + op: op, + typ: TypeServerMetadata, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withServerMetadataID sets the ID field of the mutation. +func withServerMetadataID(id uuid.UUID) servermetadataOption { + return func(m *ServerMetadataMutation) { + var ( + err error + once sync.Once + value *ServerMetadata + ) + m.oldValue = func(ctx context.Context) (*ServerMetadata, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().ServerMetadata.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withServerMetadata sets the old ServerMetadata of the mutation. +func withServerMetadata(node *ServerMetadata) servermetadataOption { + return func(m *ServerMetadataMutation) { + m.oldValue = func(context.Context) (*ServerMetadata, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m ServerMetadataMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m ServerMetadataMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of ServerMetadata entities. +func (m *ServerMetadataMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *ServerMetadataMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *ServerMetadataMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().ServerMetadata.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetIsRemote sets the "isRemote" field. +func (m *ServerMetadataMutation) SetIsRemote(b bool) { + m.isRemote = &b +} + +// IsRemote returns the value of the "isRemote" field in the mutation. +func (m *ServerMetadataMutation) IsRemote() (r bool, exists bool) { + v := m.isRemote + if v == nil { + return + } + return *v, true +} + +// OldIsRemote returns the old "isRemote" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldIsRemote(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsRemote is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsRemote requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsRemote: %w", err) + } + return oldValue.IsRemote, nil +} + +// ResetIsRemote resets all changes to the "isRemote" field. +func (m *ServerMetadataMutation) ResetIsRemote() { + m.isRemote = nil +} + +// SetURI sets the "uri" field. +func (m *ServerMetadataMutation) SetURI(s string) { + m.uri = &s +} + +// URI returns the value of the "uri" field in the mutation. +func (m *ServerMetadataMutation) URI() (r string, exists bool) { + v := m.uri + if v == nil { + return + } + return *v, true +} + +// OldURI returns the old "uri" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURI: %w", err) + } + return oldValue.URI, nil +} + +// ResetURI resets all changes to the "uri" field. +func (m *ServerMetadataMutation) ResetURI() { + m.uri = nil +} + +// SetExtensions sets the "extensions" field. +func (m *ServerMetadataMutation) SetExtensions(l lysand.Extensions) { + m.extensions = &l +} + +// Extensions returns the value of the "extensions" field in the mutation. +func (m *ServerMetadataMutation) Extensions() (r lysand.Extensions, exists bool) { + v := m.extensions + if v == nil { + return + } + return *v, true +} + +// OldExtensions returns the old "extensions" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldExtensions(ctx context.Context) (v lysand.Extensions, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtensions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtensions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtensions: %w", err) + } + return oldValue.Extensions, nil +} + +// ResetExtensions resets all changes to the "extensions" field. +func (m *ServerMetadataMutation) ResetExtensions() { + m.extensions = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *ServerMetadataMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *ServerMetadataMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *ServerMetadataMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *ServerMetadataMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *ServerMetadataMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *ServerMetadataMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetName sets the "name" field. +func (m *ServerMetadataMutation) SetName(s string) { + m.name = &s +} + +// Name returns the value of the "name" field in the mutation. +func (m *ServerMetadataMutation) Name() (r string, exists bool) { + v := m.name + if v == nil { + return + } + return *v, true +} + +// OldName returns the old "name" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldName(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldName: %w", err) + } + return oldValue.Name, nil +} + +// ResetName resets all changes to the "name" field. +func (m *ServerMetadataMutation) ResetName() { + m.name = nil +} + +// SetDescription sets the "description" field. +func (m *ServerMetadataMutation) SetDescription(s string) { + m.description = &s +} + +// Description returns the value of the "description" field in the mutation. +func (m *ServerMetadataMutation) Description() (r string, exists bool) { + v := m.description + if v == nil { + return + } + return *v, true +} + +// OldDescription returns the old "description" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldDescription(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDescription is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDescription requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDescription: %w", err) + } + return oldValue.Description, nil +} + +// ClearDescription clears the value of the "description" field. +func (m *ServerMetadataMutation) ClearDescription() { + m.description = nil + m.clearedFields[servermetadata.FieldDescription] = struct{}{} +} + +// DescriptionCleared returns if the "description" field was cleared in this mutation. +func (m *ServerMetadataMutation) DescriptionCleared() bool { + _, ok := m.clearedFields[servermetadata.FieldDescription] + return ok +} + +// ResetDescription resets all changes to the "description" field. +func (m *ServerMetadataMutation) ResetDescription() { + m.description = nil + delete(m.clearedFields, servermetadata.FieldDescription) +} + +// SetVersion sets the "version" field. +func (m *ServerMetadataMutation) SetVersion(s string) { + m.version = &s +} + +// Version returns the value of the "version" field in the mutation. +func (m *ServerMetadataMutation) Version() (r string, exists bool) { + v := m.version + if v == nil { + return + } + return *v, true +} + +// OldVersion returns the old "version" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldVersion(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldVersion is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldVersion requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldVersion: %w", err) + } + return oldValue.Version, nil +} + +// ResetVersion resets all changes to the "version" field. +func (m *ServerMetadataMutation) ResetVersion() { + m.version = nil +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (m *ServerMetadataMutation) SetSupportedExtensions(s []string) { + m.supportedExtensions = &s + m.appendsupportedExtensions = nil +} + +// SupportedExtensions returns the value of the "supportedExtensions" field in the mutation. +func (m *ServerMetadataMutation) SupportedExtensions() (r []string, exists bool) { + v := m.supportedExtensions + if v == nil { + return + } + return *v, true +} + +// OldSupportedExtensions returns the old "supportedExtensions" field's value of the ServerMetadata entity. +// If the ServerMetadata object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *ServerMetadataMutation) OldSupportedExtensions(ctx context.Context) (v []string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldSupportedExtensions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldSupportedExtensions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldSupportedExtensions: %w", err) + } + return oldValue.SupportedExtensions, nil +} + +// AppendSupportedExtensions adds s to the "supportedExtensions" field. +func (m *ServerMetadataMutation) AppendSupportedExtensions(s []string) { + m.appendsupportedExtensions = append(m.appendsupportedExtensions, s...) +} + +// AppendedSupportedExtensions returns the list of values that were appended to the "supportedExtensions" field in this mutation. +func (m *ServerMetadataMutation) AppendedSupportedExtensions() ([]string, bool) { + if len(m.appendsupportedExtensions) == 0 { + return nil, false + } + return m.appendsupportedExtensions, true +} + +// ResetSupportedExtensions resets all changes to the "supportedExtensions" field. +func (m *ServerMetadataMutation) ResetSupportedExtensions() { + m.supportedExtensions = nil + m.appendsupportedExtensions = nil +} + +// SetFollowerID sets the "follower" edge to the User entity by id. +func (m *ServerMetadataMutation) SetFollowerID(id uuid.UUID) { + m.follower = &id +} + +// ClearFollower clears the "follower" edge to the User entity. +func (m *ServerMetadataMutation) ClearFollower() { + m.clearedfollower = true +} + +// FollowerCleared reports if the "follower" edge to the User entity was cleared. +func (m *ServerMetadataMutation) FollowerCleared() bool { + return m.clearedfollower +} + +// FollowerID returns the "follower" edge ID in the mutation. +func (m *ServerMetadataMutation) FollowerID() (id uuid.UUID, exists bool) { + if m.follower != nil { + return *m.follower, true + } + return +} + +// FollowerIDs returns the "follower" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// FollowerID instead. It exists only for internal usage by the builders. +func (m *ServerMetadataMutation) FollowerIDs() (ids []uuid.UUID) { + if id := m.follower; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetFollower resets all changes to the "follower" edge. +func (m *ServerMetadataMutation) ResetFollower() { + m.follower = nil + m.clearedfollower = false +} + +// SetFolloweeID sets the "followee" edge to the User entity by id. +func (m *ServerMetadataMutation) SetFolloweeID(id uuid.UUID) { + m.followee = &id +} + +// ClearFollowee clears the "followee" edge to the User entity. +func (m *ServerMetadataMutation) ClearFollowee() { + m.clearedfollowee = true +} + +// FolloweeCleared reports if the "followee" edge to the User entity was cleared. +func (m *ServerMetadataMutation) FolloweeCleared() bool { + return m.clearedfollowee +} + +// FolloweeID returns the "followee" edge ID in the mutation. +func (m *ServerMetadataMutation) FolloweeID() (id uuid.UUID, exists bool) { + if m.followee != nil { + return *m.followee, true + } + return +} + +// FolloweeIDs returns the "followee" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// FolloweeID instead. It exists only for internal usage by the builders. +func (m *ServerMetadataMutation) FolloweeIDs() (ids []uuid.UUID) { + if id := m.followee; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetFollowee resets all changes to the "followee" edge. +func (m *ServerMetadataMutation) ResetFollowee() { + m.followee = nil + m.clearedfollowee = false +} + +// Where appends a list predicates to the ServerMetadataMutation builder. +func (m *ServerMetadataMutation) Where(ps ...predicate.ServerMetadata) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the ServerMetadataMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *ServerMetadataMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.ServerMetadata, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *ServerMetadataMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *ServerMetadataMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (ServerMetadata). +func (m *ServerMetadataMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *ServerMetadataMutation) Fields() []string { + fields := make([]string, 0, 9) + if m.isRemote != nil { + fields = append(fields, servermetadata.FieldIsRemote) + } + if m.uri != nil { + fields = append(fields, servermetadata.FieldURI) + } + if m.extensions != nil { + fields = append(fields, servermetadata.FieldExtensions) + } + if m.created_at != nil { + fields = append(fields, servermetadata.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, servermetadata.FieldUpdatedAt) + } + if m.name != nil { + fields = append(fields, servermetadata.FieldName) + } + if m.description != nil { + fields = append(fields, servermetadata.FieldDescription) + } + if m.version != nil { + fields = append(fields, servermetadata.FieldVersion) + } + if m.supportedExtensions != nil { + fields = append(fields, servermetadata.FieldSupportedExtensions) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *ServerMetadataMutation) Field(name string) (ent.Value, bool) { + switch name { + case servermetadata.FieldIsRemote: + return m.IsRemote() + case servermetadata.FieldURI: + return m.URI() + case servermetadata.FieldExtensions: + return m.Extensions() + case servermetadata.FieldCreatedAt: + return m.CreatedAt() + case servermetadata.FieldUpdatedAt: + return m.UpdatedAt() + case servermetadata.FieldName: + return m.Name() + case servermetadata.FieldDescription: + return m.Description() + case servermetadata.FieldVersion: + return m.Version() + case servermetadata.FieldSupportedExtensions: + return m.SupportedExtensions() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *ServerMetadataMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case servermetadata.FieldIsRemote: + return m.OldIsRemote(ctx) + case servermetadata.FieldURI: + return m.OldURI(ctx) + case servermetadata.FieldExtensions: + return m.OldExtensions(ctx) + case servermetadata.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case servermetadata.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case servermetadata.FieldName: + return m.OldName(ctx) + case servermetadata.FieldDescription: + return m.OldDescription(ctx) + case servermetadata.FieldVersion: + return m.OldVersion(ctx) + case servermetadata.FieldSupportedExtensions: + return m.OldSupportedExtensions(ctx) + } + return nil, fmt.Errorf("unknown ServerMetadata field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ServerMetadataMutation) SetField(name string, value ent.Value) error { + switch name { + case servermetadata.FieldIsRemote: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsRemote(v) + return nil + case servermetadata.FieldURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURI(v) + return nil + case servermetadata.FieldExtensions: + v, ok := value.(lysand.Extensions) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtensions(v) + return nil + case servermetadata.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case servermetadata.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case servermetadata.FieldName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetName(v) + return nil + case servermetadata.FieldDescription: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDescription(v) + return nil + case servermetadata.FieldVersion: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetVersion(v) + return nil + case servermetadata.FieldSupportedExtensions: + v, ok := value.([]string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetSupportedExtensions(v) + return nil + } + return fmt.Errorf("unknown ServerMetadata field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *ServerMetadataMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *ServerMetadataMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *ServerMetadataMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown ServerMetadata numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *ServerMetadataMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(servermetadata.FieldDescription) { + fields = append(fields, servermetadata.FieldDescription) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *ServerMetadataMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *ServerMetadataMutation) ClearField(name string) error { + switch name { + case servermetadata.FieldDescription: + m.ClearDescription() + return nil + } + return fmt.Errorf("unknown ServerMetadata nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *ServerMetadataMutation) ResetField(name string) error { + switch name { + case servermetadata.FieldIsRemote: + m.ResetIsRemote() + return nil + case servermetadata.FieldURI: + m.ResetURI() + return nil + case servermetadata.FieldExtensions: + m.ResetExtensions() + return nil + case servermetadata.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case servermetadata.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case servermetadata.FieldName: + m.ResetName() + return nil + case servermetadata.FieldDescription: + m.ResetDescription() + return nil + case servermetadata.FieldVersion: + m.ResetVersion() + return nil + case servermetadata.FieldSupportedExtensions: + m.ResetSupportedExtensions() + return nil + } + return fmt.Errorf("unknown ServerMetadata field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *ServerMetadataMutation) AddedEdges() []string { + edges := make([]string, 0, 2) + if m.follower != nil { + edges = append(edges, servermetadata.EdgeFollower) + } + if m.followee != nil { + edges = append(edges, servermetadata.EdgeFollowee) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *ServerMetadataMutation) AddedIDs(name string) []ent.Value { + switch name { + case servermetadata.EdgeFollower: + if id := m.follower; id != nil { + return []ent.Value{*id} + } + case servermetadata.EdgeFollowee: + if id := m.followee; id != nil { + return []ent.Value{*id} + } + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *ServerMetadataMutation) RemovedEdges() []string { + edges := make([]string, 0, 2) + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *ServerMetadataMutation) RemovedIDs(name string) []ent.Value { + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *ServerMetadataMutation) ClearedEdges() []string { + edges := make([]string, 0, 2) + if m.clearedfollower { + edges = append(edges, servermetadata.EdgeFollower) + } + if m.clearedfollowee { + edges = append(edges, servermetadata.EdgeFollowee) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *ServerMetadataMutation) EdgeCleared(name string) bool { + switch name { + case servermetadata.EdgeFollower: + return m.clearedfollower + case servermetadata.EdgeFollowee: + return m.clearedfollowee + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *ServerMetadataMutation) ClearEdge(name string) error { + switch name { + case servermetadata.EdgeFollower: + m.ClearFollower() + return nil + case servermetadata.EdgeFollowee: + m.ClearFollowee() + return nil + } + return fmt.Errorf("unknown ServerMetadata unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *ServerMetadataMutation) ResetEdge(name string) error { + switch name { + case servermetadata.EdgeFollower: + m.ResetFollower() + return nil + case servermetadata.EdgeFollowee: + m.ResetFollowee() + return nil + } + return fmt.Errorf("unknown ServerMetadata edge %s", name) +} + +// UserMutation represents an operation that mutates the User nodes in the graph. +type UserMutation struct { + config + op Op + typ string + id *uuid.UUID + isRemote *bool + uri *string + extensions *lysand.Extensions + created_at *time.Time + updated_at *time.Time + username *string + passwordHash *[]byte + displayName *string + biography *string + publicKey *ed25519.PublicKey + privateKey *ed25519.PrivateKey + indexable *bool + privacyLevel *user.PrivacyLevel + fields *[]lysand.Field + appendfields []lysand.Field + inbox *string + featured *string + followers *string + following *string + outbox *string + clearedFields map[string]struct{} + avatarImage *int + clearedavatarImage bool + headerImage *int + clearedheaderImage bool + authoredNotes map[uuid.UUID]struct{} + removedauthoredNotes map[uuid.UUID]struct{} + clearedauthoredNotes bool + mentionedNotes map[uuid.UUID]struct{} + removedmentionedNotes map[uuid.UUID]struct{} + clearedmentionedNotes bool + done bool + oldValue func(context.Context) (*User, error) + predicates []predicate.User +} + +var _ ent.Mutation = (*UserMutation)(nil) + +// userOption allows management of the mutation configuration using functional options. +type userOption func(*UserMutation) + +// newUserMutation creates new mutation for the User entity. +func newUserMutation(c config, op Op, opts ...userOption) *UserMutation { + m := &UserMutation{ + config: c, + op: op, + typ: TypeUser, + clearedFields: make(map[string]struct{}), + } + for _, opt := range opts { + opt(m) + } + return m +} + +// withUserID sets the ID field of the mutation. +func withUserID(id uuid.UUID) userOption { + return func(m *UserMutation) { + var ( + err error + once sync.Once + value *User + ) + m.oldValue = func(ctx context.Context) (*User, error) { + once.Do(func() { + if m.done { + err = errors.New("querying old values post mutation is not allowed") + } else { + value, err = m.Client().User.Get(ctx, id) + } + }) + return value, err + } + m.id = &id + } +} + +// withUser sets the old User of the mutation. +func withUser(node *User) userOption { + return func(m *UserMutation) { + m.oldValue = func(context.Context) (*User, error) { + return node, nil + } + m.id = &node.ID + } +} + +// Client returns a new `ent.Client` from the mutation. If the mutation was +// executed in a transaction (ent.Tx), a transactional client is returned. +func (m UserMutation) Client() *Client { + client := &Client{config: m.config} + client.init() + return client +} + +// Tx returns an `ent.Tx` for mutations that were executed in transactions; +// it returns an error otherwise. +func (m UserMutation) Tx() (*Tx, error) { + if _, ok := m.driver.(*txDriver); !ok { + return nil, errors.New("ent: mutation is not running in a transaction") + } + tx := &Tx{config: m.config} + tx.init() + return tx, nil +} + +// SetID sets the value of the id field. Note that this +// operation is only accepted on creation of User entities. +func (m *UserMutation) SetID(id uuid.UUID) { + m.id = &id +} + +// ID returns the ID value in the mutation. Note that the ID is only available +// if it was provided to the builder or after it was returned from the database. +func (m *UserMutation) ID() (id uuid.UUID, exists bool) { + if m.id == nil { + return + } + return *m.id, true +} + +// IDs queries the database and returns the entity ids that match the mutation's predicate. +// That means, if the mutation is applied within a transaction with an isolation level such +// as sql.LevelSerializable, the returned ids match the ids of the rows that will be updated +// or updated by the mutation. +func (m *UserMutation) IDs(ctx context.Context) ([]uuid.UUID, error) { + switch { + case m.op.Is(OpUpdateOne | OpDeleteOne): + id, exists := m.ID() + if exists { + return []uuid.UUID{id}, nil + } + fallthrough + case m.op.Is(OpUpdate | OpDelete): + return m.Client().User.Query().Where(m.predicates...).IDs(ctx) + default: + return nil, fmt.Errorf("IDs is not allowed on %s operations", m.op) + } +} + +// SetIsRemote sets the "isRemote" field. +func (m *UserMutation) SetIsRemote(b bool) { + m.isRemote = &b +} + +// IsRemote returns the value of the "isRemote" field in the mutation. +func (m *UserMutation) IsRemote() (r bool, exists bool) { + v := m.isRemote + if v == nil { + return + } + return *v, true +} + +// OldIsRemote returns the old "isRemote" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldIsRemote(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIsRemote is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIsRemote requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIsRemote: %w", err) + } + return oldValue.IsRemote, nil +} + +// ResetIsRemote resets all changes to the "isRemote" field. +func (m *UserMutation) ResetIsRemote() { + m.isRemote = nil +} + +// SetURI sets the "uri" field. +func (m *UserMutation) SetURI(s string) { + m.uri = &s +} + +// URI returns the value of the "uri" field in the mutation. +func (m *UserMutation) URI() (r string, exists bool) { + v := m.uri + if v == nil { + return + } + return *v, true +} + +// OldURI returns the old "uri" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldURI(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldURI is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldURI requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldURI: %w", err) + } + return oldValue.URI, nil +} + +// ResetURI resets all changes to the "uri" field. +func (m *UserMutation) ResetURI() { + m.uri = nil +} + +// SetExtensions sets the "extensions" field. +func (m *UserMutation) SetExtensions(l lysand.Extensions) { + m.extensions = &l +} + +// Extensions returns the value of the "extensions" field in the mutation. +func (m *UserMutation) Extensions() (r lysand.Extensions, exists bool) { + v := m.extensions + if v == nil { + return + } + return *v, true +} + +// OldExtensions returns the old "extensions" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldExtensions(ctx context.Context) (v lysand.Extensions, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldExtensions is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldExtensions requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldExtensions: %w", err) + } + return oldValue.Extensions, nil +} + +// ResetExtensions resets all changes to the "extensions" field. +func (m *UserMutation) ResetExtensions() { + m.extensions = nil +} + +// SetCreatedAt sets the "created_at" field. +func (m *UserMutation) SetCreatedAt(t time.Time) { + m.created_at = &t +} + +// CreatedAt returns the value of the "created_at" field in the mutation. +func (m *UserMutation) CreatedAt() (r time.Time, exists bool) { + v := m.created_at + if v == nil { + return + } + return *v, true +} + +// OldCreatedAt returns the old "created_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldCreatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldCreatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldCreatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldCreatedAt: %w", err) + } + return oldValue.CreatedAt, nil +} + +// ResetCreatedAt resets all changes to the "created_at" field. +func (m *UserMutation) ResetCreatedAt() { + m.created_at = nil +} + +// SetUpdatedAt sets the "updated_at" field. +func (m *UserMutation) SetUpdatedAt(t time.Time) { + m.updated_at = &t +} + +// UpdatedAt returns the value of the "updated_at" field in the mutation. +func (m *UserMutation) UpdatedAt() (r time.Time, exists bool) { + v := m.updated_at + if v == nil { + return + } + return *v, true +} + +// OldUpdatedAt returns the old "updated_at" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUpdatedAt(ctx context.Context) (v time.Time, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUpdatedAt is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUpdatedAt requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUpdatedAt: %w", err) + } + return oldValue.UpdatedAt, nil +} + +// ResetUpdatedAt resets all changes to the "updated_at" field. +func (m *UserMutation) ResetUpdatedAt() { + m.updated_at = nil +} + +// SetUsername sets the "username" field. +func (m *UserMutation) SetUsername(s string) { + m.username = &s +} + +// Username returns the value of the "username" field in the mutation. +func (m *UserMutation) Username() (r string, exists bool) { + v := m.username + if v == nil { + return + } + return *v, true +} + +// OldUsername returns the old "username" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldUsername(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldUsername is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldUsername requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldUsername: %w", err) + } + return oldValue.Username, nil +} + +// ResetUsername resets all changes to the "username" field. +func (m *UserMutation) ResetUsername() { + m.username = nil +} + +// SetPasswordHash sets the "passwordHash" field. +func (m *UserMutation) SetPasswordHash(b []byte) { + m.passwordHash = &b +} + +// PasswordHash returns the value of the "passwordHash" field in the mutation. +func (m *UserMutation) PasswordHash() (r []byte, exists bool) { + v := m.passwordHash + if v == nil { + return + } + return *v, true +} + +// OldPasswordHash returns the old "passwordHash" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPasswordHash(ctx context.Context) (v *[]byte, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPasswordHash is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPasswordHash requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPasswordHash: %w", err) + } + return oldValue.PasswordHash, nil +} + +// ClearPasswordHash clears the value of the "passwordHash" field. +func (m *UserMutation) ClearPasswordHash() { + m.passwordHash = nil + m.clearedFields[user.FieldPasswordHash] = struct{}{} +} + +// PasswordHashCleared returns if the "passwordHash" field was cleared in this mutation. +func (m *UserMutation) PasswordHashCleared() bool { + _, ok := m.clearedFields[user.FieldPasswordHash] + return ok +} + +// ResetPasswordHash resets all changes to the "passwordHash" field. +func (m *UserMutation) ResetPasswordHash() { + m.passwordHash = nil + delete(m.clearedFields, user.FieldPasswordHash) +} + +// SetDisplayName sets the "displayName" field. +func (m *UserMutation) SetDisplayName(s string) { + m.displayName = &s +} + +// DisplayName returns the value of the "displayName" field in the mutation. +func (m *UserMutation) DisplayName() (r string, exists bool) { + v := m.displayName + if v == nil { + return + } + return *v, true +} + +// OldDisplayName returns the old "displayName" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldDisplayName(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldDisplayName is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldDisplayName requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldDisplayName: %w", err) + } + return oldValue.DisplayName, nil +} + +// ClearDisplayName clears the value of the "displayName" field. +func (m *UserMutation) ClearDisplayName() { + m.displayName = nil + m.clearedFields[user.FieldDisplayName] = struct{}{} +} + +// DisplayNameCleared returns if the "displayName" field was cleared in this mutation. +func (m *UserMutation) DisplayNameCleared() bool { + _, ok := m.clearedFields[user.FieldDisplayName] + return ok +} + +// ResetDisplayName resets all changes to the "displayName" field. +func (m *UserMutation) ResetDisplayName() { + m.displayName = nil + delete(m.clearedFields, user.FieldDisplayName) +} + +// SetBiography sets the "biography" field. +func (m *UserMutation) SetBiography(s string) { + m.biography = &s +} + +// Biography returns the value of the "biography" field in the mutation. +func (m *UserMutation) Biography() (r string, exists bool) { + v := m.biography + if v == nil { + return + } + return *v, true +} + +// OldBiography returns the old "biography" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldBiography(ctx context.Context) (v *string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldBiography is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldBiography requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldBiography: %w", err) + } + return oldValue.Biography, nil +} + +// ClearBiography clears the value of the "biography" field. +func (m *UserMutation) ClearBiography() { + m.biography = nil + m.clearedFields[user.FieldBiography] = struct{}{} +} + +// BiographyCleared returns if the "biography" field was cleared in this mutation. +func (m *UserMutation) BiographyCleared() bool { + _, ok := m.clearedFields[user.FieldBiography] + return ok +} + +// ResetBiography resets all changes to the "biography" field. +func (m *UserMutation) ResetBiography() { + m.biography = nil + delete(m.clearedFields, user.FieldBiography) +} + +// SetPublicKey sets the "publicKey" field. +func (m *UserMutation) SetPublicKey(ek ed25519.PublicKey) { + m.publicKey = &ek +} + +// PublicKey returns the value of the "publicKey" field in the mutation. +func (m *UserMutation) PublicKey() (r ed25519.PublicKey, exists bool) { + v := m.publicKey + if v == nil { + return + } + return *v, true +} + +// OldPublicKey returns the old "publicKey" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPublicKey(ctx context.Context) (v ed25519.PublicKey, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPublicKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPublicKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPublicKey: %w", err) + } + return oldValue.PublicKey, nil +} + +// ResetPublicKey resets all changes to the "publicKey" field. +func (m *UserMutation) ResetPublicKey() { + m.publicKey = nil +} + +// SetPrivateKey sets the "privateKey" field. +func (m *UserMutation) SetPrivateKey(ek ed25519.PrivateKey) { + m.privateKey = &ek +} + +// PrivateKey returns the value of the "privateKey" field in the mutation. +func (m *UserMutation) PrivateKey() (r ed25519.PrivateKey, exists bool) { + v := m.privateKey + if v == nil { + return + } + return *v, true +} + +// OldPrivateKey returns the old "privateKey" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPrivateKey(ctx context.Context) (v ed25519.PrivateKey, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPrivateKey is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPrivateKey requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPrivateKey: %w", err) + } + return oldValue.PrivateKey, nil +} + +// ClearPrivateKey clears the value of the "privateKey" field. +func (m *UserMutation) ClearPrivateKey() { + m.privateKey = nil + m.clearedFields[user.FieldPrivateKey] = struct{}{} +} + +// PrivateKeyCleared returns if the "privateKey" field was cleared in this mutation. +func (m *UserMutation) PrivateKeyCleared() bool { + _, ok := m.clearedFields[user.FieldPrivateKey] + return ok +} + +// ResetPrivateKey resets all changes to the "privateKey" field. +func (m *UserMutation) ResetPrivateKey() { + m.privateKey = nil + delete(m.clearedFields, user.FieldPrivateKey) +} + +// SetIndexable sets the "indexable" field. +func (m *UserMutation) SetIndexable(b bool) { + m.indexable = &b +} + +// Indexable returns the value of the "indexable" field in the mutation. +func (m *UserMutation) Indexable() (r bool, exists bool) { + v := m.indexable + if v == nil { + return + } + return *v, true +} + +// OldIndexable returns the old "indexable" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldIndexable(ctx context.Context) (v bool, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldIndexable is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldIndexable requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldIndexable: %w", err) + } + return oldValue.Indexable, nil +} + +// ResetIndexable resets all changes to the "indexable" field. +func (m *UserMutation) ResetIndexable() { + m.indexable = nil +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (m *UserMutation) SetPrivacyLevel(ul user.PrivacyLevel) { + m.privacyLevel = &ul +} + +// PrivacyLevel returns the value of the "privacyLevel" field in the mutation. +func (m *UserMutation) PrivacyLevel() (r user.PrivacyLevel, exists bool) { + v := m.privacyLevel + if v == nil { + return + } + return *v, true +} + +// OldPrivacyLevel returns the old "privacyLevel" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldPrivacyLevel(ctx context.Context) (v user.PrivacyLevel, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldPrivacyLevel is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldPrivacyLevel requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldPrivacyLevel: %w", err) + } + return oldValue.PrivacyLevel, nil +} + +// ResetPrivacyLevel resets all changes to the "privacyLevel" field. +func (m *UserMutation) ResetPrivacyLevel() { + m.privacyLevel = nil +} + +// SetFields sets the "fields" field. +func (m *UserMutation) SetFields(l []lysand.Field) { + m.fields = &l + m.appendfields = nil +} + +// GetFields returns the value of the "fields" field in the mutation. +func (m *UserMutation) GetFields() (r []lysand.Field, exists bool) { + v := m.fields + if v == nil { + return + } + return *v, true +} + +// OldFields returns the old "fields" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldFields(ctx context.Context) (v []lysand.Field, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFields is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFields requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFields: %w", err) + } + return oldValue.Fields, nil +} + +// AppendFields adds l to the "fields" field. +func (m *UserMutation) AppendFields(l []lysand.Field) { + m.appendfields = append(m.appendfields, l...) +} + +// AppendedFields returns the list of values that were appended to the "fields" field in this mutation. +func (m *UserMutation) AppendedFields() ([]lysand.Field, bool) { + if len(m.appendfields) == 0 { + return nil, false + } + return m.appendfields, true +} + +// ResetFields resets all changes to the "fields" field. +func (m *UserMutation) ResetFields() { + m.fields = nil + m.appendfields = nil +} + +// SetInbox sets the "inbox" field. +func (m *UserMutation) SetInbox(s string) { + m.inbox = &s +} + +// Inbox returns the value of the "inbox" field in the mutation. +func (m *UserMutation) Inbox() (r string, exists bool) { + v := m.inbox + if v == nil { + return + } + return *v, true +} + +// OldInbox returns the old "inbox" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldInbox(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldInbox is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldInbox requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldInbox: %w", err) + } + return oldValue.Inbox, nil +} + +// ResetInbox resets all changes to the "inbox" field. +func (m *UserMutation) ResetInbox() { + m.inbox = nil +} + +// SetFeatured sets the "featured" field. +func (m *UserMutation) SetFeatured(s string) { + m.featured = &s +} + +// Featured returns the value of the "featured" field in the mutation. +func (m *UserMutation) Featured() (r string, exists bool) { + v := m.featured + if v == nil { + return + } + return *v, true +} + +// OldFeatured returns the old "featured" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldFeatured(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFeatured is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFeatured requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFeatured: %w", err) + } + return oldValue.Featured, nil +} + +// ResetFeatured resets all changes to the "featured" field. +func (m *UserMutation) ResetFeatured() { + m.featured = nil +} + +// SetFollowers sets the "followers" field. +func (m *UserMutation) SetFollowers(s string) { + m.followers = &s +} + +// Followers returns the value of the "followers" field in the mutation. +func (m *UserMutation) Followers() (r string, exists bool) { + v := m.followers + if v == nil { + return + } + return *v, true +} + +// OldFollowers returns the old "followers" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldFollowers(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFollowers is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFollowers requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFollowers: %w", err) + } + return oldValue.Followers, nil +} + +// ResetFollowers resets all changes to the "followers" field. +func (m *UserMutation) ResetFollowers() { + m.followers = nil +} + +// SetFollowing sets the "following" field. +func (m *UserMutation) SetFollowing(s string) { + m.following = &s +} + +// Following returns the value of the "following" field in the mutation. +func (m *UserMutation) Following() (r string, exists bool) { + v := m.following + if v == nil { + return + } + return *v, true +} + +// OldFollowing returns the old "following" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldFollowing(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldFollowing is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldFollowing requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldFollowing: %w", err) + } + return oldValue.Following, nil +} + +// ResetFollowing resets all changes to the "following" field. +func (m *UserMutation) ResetFollowing() { + m.following = nil +} + +// SetOutbox sets the "outbox" field. +func (m *UserMutation) SetOutbox(s string) { + m.outbox = &s +} + +// Outbox returns the value of the "outbox" field in the mutation. +func (m *UserMutation) Outbox() (r string, exists bool) { + v := m.outbox + if v == nil { + return + } + return *v, true +} + +// OldOutbox returns the old "outbox" field's value of the User entity. +// If the User object wasn't provided to the builder, the object is fetched from the database. +// An error is returned if the mutation operation is not UpdateOne, or the database query fails. +func (m *UserMutation) OldOutbox(ctx context.Context) (v string, err error) { + if !m.op.Is(OpUpdateOne) { + return v, errors.New("OldOutbox is only allowed on UpdateOne operations") + } + if m.id == nil || m.oldValue == nil { + return v, errors.New("OldOutbox requires an ID field in the mutation") + } + oldValue, err := m.oldValue(ctx) + if err != nil { + return v, fmt.Errorf("querying old value for OldOutbox: %w", err) + } + return oldValue.Outbox, nil +} + +// ResetOutbox resets all changes to the "outbox" field. +func (m *UserMutation) ResetOutbox() { + m.outbox = nil +} + +// SetAvatarImageID sets the "avatarImage" edge to the Image entity by id. +func (m *UserMutation) SetAvatarImageID(id int) { + m.avatarImage = &id +} + +// ClearAvatarImage clears the "avatarImage" edge to the Image entity. +func (m *UserMutation) ClearAvatarImage() { + m.clearedavatarImage = true +} + +// AvatarImageCleared reports if the "avatarImage" edge to the Image entity was cleared. +func (m *UserMutation) AvatarImageCleared() bool { + return m.clearedavatarImage +} + +// AvatarImageID returns the "avatarImage" edge ID in the mutation. +func (m *UserMutation) AvatarImageID() (id int, exists bool) { + if m.avatarImage != nil { + return *m.avatarImage, true + } + return +} + +// AvatarImageIDs returns the "avatarImage" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// AvatarImageID instead. It exists only for internal usage by the builders. +func (m *UserMutation) AvatarImageIDs() (ids []int) { + if id := m.avatarImage; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetAvatarImage resets all changes to the "avatarImage" edge. +func (m *UserMutation) ResetAvatarImage() { + m.avatarImage = nil + m.clearedavatarImage = false +} + +// SetHeaderImageID sets the "headerImage" edge to the Image entity by id. +func (m *UserMutation) SetHeaderImageID(id int) { + m.headerImage = &id +} + +// ClearHeaderImage clears the "headerImage" edge to the Image entity. +func (m *UserMutation) ClearHeaderImage() { + m.clearedheaderImage = true +} + +// HeaderImageCleared reports if the "headerImage" edge to the Image entity was cleared. +func (m *UserMutation) HeaderImageCleared() bool { + return m.clearedheaderImage +} + +// HeaderImageID returns the "headerImage" edge ID in the mutation. +func (m *UserMutation) HeaderImageID() (id int, exists bool) { + if m.headerImage != nil { + return *m.headerImage, true + } + return +} + +// HeaderImageIDs returns the "headerImage" edge IDs in the mutation. +// Note that IDs always returns len(IDs) <= 1 for unique edges, and you should use +// HeaderImageID instead. It exists only for internal usage by the builders. +func (m *UserMutation) HeaderImageIDs() (ids []int) { + if id := m.headerImage; id != nil { + ids = append(ids, *id) + } + return +} + +// ResetHeaderImage resets all changes to the "headerImage" edge. +func (m *UserMutation) ResetHeaderImage() { + m.headerImage = nil + m.clearedheaderImage = false +} + +// AddAuthoredNoteIDs adds the "authoredNotes" edge to the Note entity by ids. +func (m *UserMutation) AddAuthoredNoteIDs(ids ...uuid.UUID) { + if m.authoredNotes == nil { + m.authoredNotes = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.authoredNotes[ids[i]] = struct{}{} + } +} + +// ClearAuthoredNotes clears the "authoredNotes" edge to the Note entity. +func (m *UserMutation) ClearAuthoredNotes() { + m.clearedauthoredNotes = true +} + +// AuthoredNotesCleared reports if the "authoredNotes" edge to the Note entity was cleared. +func (m *UserMutation) AuthoredNotesCleared() bool { + return m.clearedauthoredNotes +} + +// RemoveAuthoredNoteIDs removes the "authoredNotes" edge to the Note entity by IDs. +func (m *UserMutation) RemoveAuthoredNoteIDs(ids ...uuid.UUID) { + if m.removedauthoredNotes == nil { + m.removedauthoredNotes = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.authoredNotes, ids[i]) + m.removedauthoredNotes[ids[i]] = struct{}{} + } +} + +// RemovedAuthoredNotes returns the removed IDs of the "authoredNotes" edge to the Note entity. +func (m *UserMutation) RemovedAuthoredNotesIDs() (ids []uuid.UUID) { + for id := range m.removedauthoredNotes { + ids = append(ids, id) + } + return +} + +// AuthoredNotesIDs returns the "authoredNotes" edge IDs in the mutation. +func (m *UserMutation) AuthoredNotesIDs() (ids []uuid.UUID) { + for id := range m.authoredNotes { + ids = append(ids, id) + } + return +} + +// ResetAuthoredNotes resets all changes to the "authoredNotes" edge. +func (m *UserMutation) ResetAuthoredNotes() { + m.authoredNotes = nil + m.clearedauthoredNotes = false + m.removedauthoredNotes = nil +} + +// AddMentionedNoteIDs adds the "mentionedNotes" edge to the Note entity by ids. +func (m *UserMutation) AddMentionedNoteIDs(ids ...uuid.UUID) { + if m.mentionedNotes == nil { + m.mentionedNotes = make(map[uuid.UUID]struct{}) + } + for i := range ids { + m.mentionedNotes[ids[i]] = struct{}{} + } +} + +// ClearMentionedNotes clears the "mentionedNotes" edge to the Note entity. +func (m *UserMutation) ClearMentionedNotes() { + m.clearedmentionedNotes = true +} + +// MentionedNotesCleared reports if the "mentionedNotes" edge to the Note entity was cleared. +func (m *UserMutation) MentionedNotesCleared() bool { + return m.clearedmentionedNotes +} + +// RemoveMentionedNoteIDs removes the "mentionedNotes" edge to the Note entity by IDs. +func (m *UserMutation) RemoveMentionedNoteIDs(ids ...uuid.UUID) { + if m.removedmentionedNotes == nil { + m.removedmentionedNotes = make(map[uuid.UUID]struct{}) + } + for i := range ids { + delete(m.mentionedNotes, ids[i]) + m.removedmentionedNotes[ids[i]] = struct{}{} + } +} + +// RemovedMentionedNotes returns the removed IDs of the "mentionedNotes" edge to the Note entity. +func (m *UserMutation) RemovedMentionedNotesIDs() (ids []uuid.UUID) { + for id := range m.removedmentionedNotes { + ids = append(ids, id) + } + return +} + +// MentionedNotesIDs returns the "mentionedNotes" edge IDs in the mutation. +func (m *UserMutation) MentionedNotesIDs() (ids []uuid.UUID) { + for id := range m.mentionedNotes { + ids = append(ids, id) + } + return +} + +// ResetMentionedNotes resets all changes to the "mentionedNotes" edge. +func (m *UserMutation) ResetMentionedNotes() { + m.mentionedNotes = nil + m.clearedmentionedNotes = false + m.removedmentionedNotes = nil +} + +// Where appends a list predicates to the UserMutation builder. +func (m *UserMutation) Where(ps ...predicate.User) { + m.predicates = append(m.predicates, ps...) +} + +// WhereP appends storage-level predicates to the UserMutation builder. Using this method, +// users can use type-assertion to append predicates that do not depend on any generated package. +func (m *UserMutation) WhereP(ps ...func(*sql.Selector)) { + p := make([]predicate.User, len(ps)) + for i := range ps { + p[i] = ps[i] + } + m.Where(p...) +} + +// Op returns the operation name. +func (m *UserMutation) Op() Op { + return m.op +} + +// SetOp allows setting the mutation operation. +func (m *UserMutation) SetOp(op Op) { + m.op = op +} + +// Type returns the node type of this mutation (User). +func (m *UserMutation) Type() string { + return m.typ +} + +// Fields returns all fields that were changed during this mutation. Note that in +// order to get all numeric fields that were incremented/decremented, call +// AddedFields(). +func (m *UserMutation) Fields() []string { + fields := make([]string, 0, 19) + if m.isRemote != nil { + fields = append(fields, user.FieldIsRemote) + } + if m.uri != nil { + fields = append(fields, user.FieldURI) + } + if m.extensions != nil { + fields = append(fields, user.FieldExtensions) + } + if m.created_at != nil { + fields = append(fields, user.FieldCreatedAt) + } + if m.updated_at != nil { + fields = append(fields, user.FieldUpdatedAt) + } + if m.username != nil { + fields = append(fields, user.FieldUsername) + } + if m.passwordHash != nil { + fields = append(fields, user.FieldPasswordHash) + } + if m.displayName != nil { + fields = append(fields, user.FieldDisplayName) + } + if m.biography != nil { + fields = append(fields, user.FieldBiography) + } + if m.publicKey != nil { + fields = append(fields, user.FieldPublicKey) + } + if m.privateKey != nil { + fields = append(fields, user.FieldPrivateKey) + } + if m.indexable != nil { + fields = append(fields, user.FieldIndexable) + } + if m.privacyLevel != nil { + fields = append(fields, user.FieldPrivacyLevel) + } + if m.fields != nil { + fields = append(fields, user.FieldFields) + } + if m.inbox != nil { + fields = append(fields, user.FieldInbox) + } + if m.featured != nil { + fields = append(fields, user.FieldFeatured) + } + if m.followers != nil { + fields = append(fields, user.FieldFollowers) + } + if m.following != nil { + fields = append(fields, user.FieldFollowing) + } + if m.outbox != nil { + fields = append(fields, user.FieldOutbox) + } + return fields +} + +// Field returns the value of a field with the given name. The second boolean +// return value indicates that this field was not set, or was not defined in the +// schema. +func (m *UserMutation) Field(name string) (ent.Value, bool) { + switch name { + case user.FieldIsRemote: + return m.IsRemote() + case user.FieldURI: + return m.URI() + case user.FieldExtensions: + return m.Extensions() + case user.FieldCreatedAt: + return m.CreatedAt() + case user.FieldUpdatedAt: + return m.UpdatedAt() + case user.FieldUsername: + return m.Username() + case user.FieldPasswordHash: + return m.PasswordHash() + case user.FieldDisplayName: + return m.DisplayName() + case user.FieldBiography: + return m.Biography() + case user.FieldPublicKey: + return m.PublicKey() + case user.FieldPrivateKey: + return m.PrivateKey() + case user.FieldIndexable: + return m.Indexable() + case user.FieldPrivacyLevel: + return m.PrivacyLevel() + case user.FieldFields: + return m.GetFields() + case user.FieldInbox: + return m.Inbox() + case user.FieldFeatured: + return m.Featured() + case user.FieldFollowers: + return m.Followers() + case user.FieldFollowing: + return m.Following() + case user.FieldOutbox: + return m.Outbox() + } + return nil, false +} + +// OldField returns the old value of the field from the database. An error is +// returned if the mutation operation is not UpdateOne, or the query to the +// database failed. +func (m *UserMutation) OldField(ctx context.Context, name string) (ent.Value, error) { + switch name { + case user.FieldIsRemote: + return m.OldIsRemote(ctx) + case user.FieldURI: + return m.OldURI(ctx) + case user.FieldExtensions: + return m.OldExtensions(ctx) + case user.FieldCreatedAt: + return m.OldCreatedAt(ctx) + case user.FieldUpdatedAt: + return m.OldUpdatedAt(ctx) + case user.FieldUsername: + return m.OldUsername(ctx) + case user.FieldPasswordHash: + return m.OldPasswordHash(ctx) + case user.FieldDisplayName: + return m.OldDisplayName(ctx) + case user.FieldBiography: + return m.OldBiography(ctx) + case user.FieldPublicKey: + return m.OldPublicKey(ctx) + case user.FieldPrivateKey: + return m.OldPrivateKey(ctx) + case user.FieldIndexable: + return m.OldIndexable(ctx) + case user.FieldPrivacyLevel: + return m.OldPrivacyLevel(ctx) + case user.FieldFields: + return m.OldFields(ctx) + case user.FieldInbox: + return m.OldInbox(ctx) + case user.FieldFeatured: + return m.OldFeatured(ctx) + case user.FieldFollowers: + return m.OldFollowers(ctx) + case user.FieldFollowing: + return m.OldFollowing(ctx) + case user.FieldOutbox: + return m.OldOutbox(ctx) + } + return nil, fmt.Errorf("unknown User field %s", name) +} + +// SetField sets the value of a field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) SetField(name string, value ent.Value) error { + switch name { + case user.FieldIsRemote: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIsRemote(v) + return nil + case user.FieldURI: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetURI(v) + return nil + case user.FieldExtensions: + v, ok := value.(lysand.Extensions) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetExtensions(v) + return nil + case user.FieldCreatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetCreatedAt(v) + return nil + case user.FieldUpdatedAt: + v, ok := value.(time.Time) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUpdatedAt(v) + return nil + case user.FieldUsername: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetUsername(v) + return nil + case user.FieldPasswordHash: + v, ok := value.([]byte) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPasswordHash(v) + return nil + case user.FieldDisplayName: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetDisplayName(v) + return nil + case user.FieldBiography: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetBiography(v) + return nil + case user.FieldPublicKey: + v, ok := value.(ed25519.PublicKey) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPublicKey(v) + return nil + case user.FieldPrivateKey: + v, ok := value.(ed25519.PrivateKey) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPrivateKey(v) + return nil + case user.FieldIndexable: + v, ok := value.(bool) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetIndexable(v) + return nil + case user.FieldPrivacyLevel: + v, ok := value.(user.PrivacyLevel) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetPrivacyLevel(v) + return nil + case user.FieldFields: + v, ok := value.([]lysand.Field) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFields(v) + return nil + case user.FieldInbox: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetInbox(v) + return nil + case user.FieldFeatured: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFeatured(v) + return nil + case user.FieldFollowers: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFollowers(v) + return nil + case user.FieldFollowing: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetFollowing(v) + return nil + case user.FieldOutbox: + v, ok := value.(string) + if !ok { + return fmt.Errorf("unexpected type %T for field %s", value, name) + } + m.SetOutbox(v) + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedFields returns all numeric fields that were incremented/decremented during +// this mutation. +func (m *UserMutation) AddedFields() []string { + return nil +} + +// AddedField returns the numeric value that was incremented/decremented on a field +// with the given name. The second boolean return value indicates that this field +// was not set, or was not defined in the schema. +func (m *UserMutation) AddedField(name string) (ent.Value, bool) { + return nil, false +} + +// AddField adds the value to the field with the given name. It returns an error if +// the field is not defined in the schema, or if the type mismatched the field +// type. +func (m *UserMutation) AddField(name string, value ent.Value) error { + switch name { + } + return fmt.Errorf("unknown User numeric field %s", name) +} + +// ClearedFields returns all nullable fields that were cleared during this +// mutation. +func (m *UserMutation) ClearedFields() []string { + var fields []string + if m.FieldCleared(user.FieldPasswordHash) { + fields = append(fields, user.FieldPasswordHash) + } + if m.FieldCleared(user.FieldDisplayName) { + fields = append(fields, user.FieldDisplayName) + } + if m.FieldCleared(user.FieldBiography) { + fields = append(fields, user.FieldBiography) + } + if m.FieldCleared(user.FieldPrivateKey) { + fields = append(fields, user.FieldPrivateKey) + } + return fields +} + +// FieldCleared returns a boolean indicating if a field with the given name was +// cleared in this mutation. +func (m *UserMutation) FieldCleared(name string) bool { + _, ok := m.clearedFields[name] + return ok +} + +// ClearField clears the value of the field with the given name. It returns an +// error if the field is not defined in the schema. +func (m *UserMutation) ClearField(name string) error { + switch name { + case user.FieldPasswordHash: + m.ClearPasswordHash() + return nil + case user.FieldDisplayName: + m.ClearDisplayName() + return nil + case user.FieldBiography: + m.ClearBiography() + return nil + case user.FieldPrivateKey: + m.ClearPrivateKey() + return nil + } + return fmt.Errorf("unknown User nullable field %s", name) +} + +// ResetField resets all changes in the mutation for the field with the given name. +// It returns an error if the field is not defined in the schema. +func (m *UserMutation) ResetField(name string) error { + switch name { + case user.FieldIsRemote: + m.ResetIsRemote() + return nil + case user.FieldURI: + m.ResetURI() + return nil + case user.FieldExtensions: + m.ResetExtensions() + return nil + case user.FieldCreatedAt: + m.ResetCreatedAt() + return nil + case user.FieldUpdatedAt: + m.ResetUpdatedAt() + return nil + case user.FieldUsername: + m.ResetUsername() + return nil + case user.FieldPasswordHash: + m.ResetPasswordHash() + return nil + case user.FieldDisplayName: + m.ResetDisplayName() + return nil + case user.FieldBiography: + m.ResetBiography() + return nil + case user.FieldPublicKey: + m.ResetPublicKey() + return nil + case user.FieldPrivateKey: + m.ResetPrivateKey() + return nil + case user.FieldIndexable: + m.ResetIndexable() + return nil + case user.FieldPrivacyLevel: + m.ResetPrivacyLevel() + return nil + case user.FieldFields: + m.ResetFields() + return nil + case user.FieldInbox: + m.ResetInbox() + return nil + case user.FieldFeatured: + m.ResetFeatured() + return nil + case user.FieldFollowers: + m.ResetFollowers() + return nil + case user.FieldFollowing: + m.ResetFollowing() + return nil + case user.FieldOutbox: + m.ResetOutbox() + return nil + } + return fmt.Errorf("unknown User field %s", name) +} + +// AddedEdges returns all edge names that were set/added in this mutation. +func (m *UserMutation) AddedEdges() []string { + edges := make([]string, 0, 4) + if m.avatarImage != nil { + edges = append(edges, user.EdgeAvatarImage) + } + if m.headerImage != nil { + edges = append(edges, user.EdgeHeaderImage) + } + if m.authoredNotes != nil { + edges = append(edges, user.EdgeAuthoredNotes) + } + if m.mentionedNotes != nil { + edges = append(edges, user.EdgeMentionedNotes) + } + return edges +} + +// AddedIDs returns all IDs (to other nodes) that were added for the given edge +// name in this mutation. +func (m *UserMutation) AddedIDs(name string) []ent.Value { + switch name { + case user.EdgeAvatarImage: + if id := m.avatarImage; id != nil { + return []ent.Value{*id} + } + case user.EdgeHeaderImage: + if id := m.headerImage; id != nil { + return []ent.Value{*id} + } + case user.EdgeAuthoredNotes: + ids := make([]ent.Value, 0, len(m.authoredNotes)) + for id := range m.authoredNotes { + ids = append(ids, id) + } + return ids + case user.EdgeMentionedNotes: + ids := make([]ent.Value, 0, len(m.mentionedNotes)) + for id := range m.mentionedNotes { + ids = append(ids, id) + } + return ids + } + return nil +} + +// RemovedEdges returns all edge names that were removed in this mutation. +func (m *UserMutation) RemovedEdges() []string { + edges := make([]string, 0, 4) + if m.removedauthoredNotes != nil { + edges = append(edges, user.EdgeAuthoredNotes) + } + if m.removedmentionedNotes != nil { + edges = append(edges, user.EdgeMentionedNotes) + } + return edges +} + +// RemovedIDs returns all IDs (to other nodes) that were removed for the edge with +// the given name in this mutation. +func (m *UserMutation) RemovedIDs(name string) []ent.Value { + switch name { + case user.EdgeAuthoredNotes: + ids := make([]ent.Value, 0, len(m.removedauthoredNotes)) + for id := range m.removedauthoredNotes { + ids = append(ids, id) + } + return ids + case user.EdgeMentionedNotes: + ids := make([]ent.Value, 0, len(m.removedmentionedNotes)) + for id := range m.removedmentionedNotes { + ids = append(ids, id) + } + return ids + } + return nil +} + +// ClearedEdges returns all edge names that were cleared in this mutation. +func (m *UserMutation) ClearedEdges() []string { + edges := make([]string, 0, 4) + if m.clearedavatarImage { + edges = append(edges, user.EdgeAvatarImage) + } + if m.clearedheaderImage { + edges = append(edges, user.EdgeHeaderImage) + } + if m.clearedauthoredNotes { + edges = append(edges, user.EdgeAuthoredNotes) + } + if m.clearedmentionedNotes { + edges = append(edges, user.EdgeMentionedNotes) + } + return edges +} + +// EdgeCleared returns a boolean which indicates if the edge with the given name +// was cleared in this mutation. +func (m *UserMutation) EdgeCleared(name string) bool { + switch name { + case user.EdgeAvatarImage: + return m.clearedavatarImage + case user.EdgeHeaderImage: + return m.clearedheaderImage + case user.EdgeAuthoredNotes: + return m.clearedauthoredNotes + case user.EdgeMentionedNotes: + return m.clearedmentionedNotes + } + return false +} + +// ClearEdge clears the value of the edge with the given name. It returns an error +// if that edge is not defined in the schema. +func (m *UserMutation) ClearEdge(name string) error { + switch name { + case user.EdgeAvatarImage: + m.ClearAvatarImage() + return nil + case user.EdgeHeaderImage: + m.ClearHeaderImage() + return nil + } + return fmt.Errorf("unknown User unique edge %s", name) +} + +// ResetEdge resets all changes to the edge with the given name in this mutation. +// It returns an error if the edge is not defined in the schema. +func (m *UserMutation) ResetEdge(name string) error { + switch name { + case user.EdgeAvatarImage: + m.ResetAvatarImage() + return nil + case user.EdgeHeaderImage: + m.ResetHeaderImage() + return nil + case user.EdgeAuthoredNotes: + m.ResetAuthoredNotes() + return nil + case user.EdgeMentionedNotes: + m.ResetMentionedNotes() + return nil + } + return fmt.Errorf("unknown User edge %s", name) +} diff --git a/ent/note.go b/ent/note.go new file mode 100644 index 0000000..205a305 --- /dev/null +++ b/ent/note.go @@ -0,0 +1,277 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// Note is the model entity for the Note schema. +type Note struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // IsRemote holds the value of the "isRemote" field. + IsRemote bool `json:"isRemote,omitempty"` + // URI holds the value of the "uri" field. + URI string `json:"uri,omitempty"` + // Extensions holds the value of the "extensions" field. + Extensions lysand.Extensions `json:"extensions,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Subject holds the value of the "subject" field. + Subject *string `json:"subject,omitempty"` + // Content holds the value of the "content" field. + Content string `json:"content,omitempty"` + // IsSensitive holds the value of the "isSensitive" field. + IsSensitive bool `json:"isSensitive,omitempty"` + // Visibility holds the value of the "visibility" field. + Visibility note.Visibility `json:"visibility,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the NoteQuery when eager-loading is set. + Edges NoteEdges `json:"edges"` + note_author *uuid.UUID + selectValues sql.SelectValues +} + +// NoteEdges holds the relations/edges for other nodes in the graph. +type NoteEdges struct { + // Author holds the value of the author edge. + Author *User `json:"author,omitempty"` + // Mentions holds the value of the mentions edge. + Mentions []*User `json:"mentions,omitempty"` + // Attachments holds the value of the attachments edge. + Attachments []*Attachment `json:"attachments,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [3]bool +} + +// AuthorOrErr returns the Author value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e NoteEdges) AuthorOrErr() (*User, error) { + if e.Author != nil { + return e.Author, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "author"} +} + +// MentionsOrErr returns the Mentions value or an error if the edge +// was not loaded in eager-loading. +func (e NoteEdges) MentionsOrErr() ([]*User, error) { + if e.loadedTypes[1] { + return e.Mentions, nil + } + return nil, &NotLoadedError{edge: "mentions"} +} + +// AttachmentsOrErr returns the Attachments value or an error if the edge +// was not loaded in eager-loading. +func (e NoteEdges) AttachmentsOrErr() ([]*Attachment, error) { + if e.loadedTypes[2] { + return e.Attachments, nil + } + return nil, &NotLoadedError{edge: "attachments"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*Note) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case note.FieldExtensions: + values[i] = new([]byte) + case note.FieldIsRemote, note.FieldIsSensitive: + values[i] = new(sql.NullBool) + case note.FieldURI, note.FieldSubject, note.FieldContent, note.FieldVisibility: + values[i] = new(sql.NullString) + case note.FieldCreatedAt, note.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case note.FieldID: + values[i] = new(uuid.UUID) + case note.ForeignKeys[0]: // note_author + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the Note fields. +func (n *Note) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case note.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + n.ID = *value + } + case note.FieldIsRemote: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isRemote", values[i]) + } else if value.Valid { + n.IsRemote = value.Bool + } + case note.FieldURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field uri", values[i]) + } else if value.Valid { + n.URI = value.String + } + case note.FieldExtensions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extensions", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &n.Extensions); err != nil { + return fmt.Errorf("unmarshal field extensions: %w", err) + } + } + case note.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + n.CreatedAt = value.Time + } + case note.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + n.UpdatedAt = value.Time + } + case note.FieldSubject: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field subject", values[i]) + } else if value.Valid { + n.Subject = new(string) + *n.Subject = value.String + } + case note.FieldContent: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field content", values[i]) + } else if value.Valid { + n.Content = value.String + } + case note.FieldIsSensitive: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isSensitive", values[i]) + } else if value.Valid { + n.IsSensitive = value.Bool + } + case note.FieldVisibility: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field visibility", values[i]) + } else if value.Valid { + n.Visibility = note.Visibility(value.String) + } + case note.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field note_author", values[i]) + } else if value.Valid { + n.note_author = new(uuid.UUID) + *n.note_author = *value.S.(*uuid.UUID) + } + default: + n.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the Note. +// This includes values selected through modifiers, order, etc. +func (n *Note) Value(name string) (ent.Value, error) { + return n.selectValues.Get(name) +} + +// QueryAuthor queries the "author" edge of the Note entity. +func (n *Note) QueryAuthor() *UserQuery { + return NewNoteClient(n.config).QueryAuthor(n) +} + +// QueryMentions queries the "mentions" edge of the Note entity. +func (n *Note) QueryMentions() *UserQuery { + return NewNoteClient(n.config).QueryMentions(n) +} + +// QueryAttachments queries the "attachments" edge of the Note entity. +func (n *Note) QueryAttachments() *AttachmentQuery { + return NewNoteClient(n.config).QueryAttachments(n) +} + +// Update returns a builder for updating this Note. +// Note that you need to call Note.Unwrap() before calling this method if this Note +// was returned from a transaction, and the transaction was committed or rolled back. +func (n *Note) Update() *NoteUpdateOne { + return NewNoteClient(n.config).UpdateOne(n) +} + +// Unwrap unwraps the Note entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (n *Note) Unwrap() *Note { + _tx, ok := n.config.driver.(*txDriver) + if !ok { + panic("ent: Note is not a transactional entity") + } + n.config.driver = _tx.drv + return n +} + +// String implements the fmt.Stringer. +func (n *Note) String() string { + var builder strings.Builder + builder.WriteString("Note(") + builder.WriteString(fmt.Sprintf("id=%v, ", n.ID)) + builder.WriteString("isRemote=") + builder.WriteString(fmt.Sprintf("%v", n.IsRemote)) + builder.WriteString(", ") + builder.WriteString("uri=") + builder.WriteString(n.URI) + builder.WriteString(", ") + builder.WriteString("extensions=") + builder.WriteString(fmt.Sprintf("%v", n.Extensions)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(n.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(n.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + if v := n.Subject; v != nil { + builder.WriteString("subject=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("content=") + builder.WriteString(n.Content) + builder.WriteString(", ") + builder.WriteString("isSensitive=") + builder.WriteString(fmt.Sprintf("%v", n.IsSensitive)) + builder.WriteString(", ") + builder.WriteString("visibility=") + builder.WriteString(fmt.Sprintf("%v", n.Visibility)) + builder.WriteByte(')') + return builder.String() +} + +// Notes is a parsable slice of Note. +type Notes []*Note diff --git a/ent/note/note.go b/ent/note/note.go new file mode 100644 index 0000000..ad65136 --- /dev/null +++ b/ent/note/note.go @@ -0,0 +1,257 @@ +// Code generated by ent, DO NOT EDIT. + +package note + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const ( + // Label holds the string label denoting the note type in the database. + Label = "note" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldIsRemote holds the string denoting the isremote field in the database. + FieldIsRemote = "is_remote" + // FieldURI holds the string denoting the uri field in the database. + FieldURI = "uri" + // FieldExtensions holds the string denoting the extensions field in the database. + FieldExtensions = "extensions" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldSubject holds the string denoting the subject field in the database. + FieldSubject = "subject" + // FieldContent holds the string denoting the content field in the database. + FieldContent = "content" + // FieldIsSensitive holds the string denoting the issensitive field in the database. + FieldIsSensitive = "is_sensitive" + // FieldVisibility holds the string denoting the visibility field in the database. + FieldVisibility = "visibility" + // EdgeAuthor holds the string denoting the author edge name in mutations. + EdgeAuthor = "author" + // EdgeMentions holds the string denoting the mentions edge name in mutations. + EdgeMentions = "mentions" + // EdgeAttachments holds the string denoting the attachments edge name in mutations. + EdgeAttachments = "attachments" + // Table holds the table name of the note in the database. + Table = "notes" + // AuthorTable is the table that holds the author relation/edge. + AuthorTable = "notes" + // AuthorInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + AuthorInverseTable = "users" + // AuthorColumn is the table column denoting the author relation/edge. + AuthorColumn = "note_author" + // MentionsTable is the table that holds the mentions relation/edge. The primary key declared below. + MentionsTable = "note_mentions" + // MentionsInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + MentionsInverseTable = "users" + // AttachmentsTable is the table that holds the attachments relation/edge. + AttachmentsTable = "attachments" + // AttachmentsInverseTable is the table name for the Attachment entity. + // It exists in this package in order to avoid circular dependency with the "attachment" package. + AttachmentsInverseTable = "attachments" + // AttachmentsColumn is the table column denoting the attachments relation/edge. + AttachmentsColumn = "note_attachments" +) + +// Columns holds all SQL columns for note fields. +var Columns = []string{ + FieldID, + FieldIsRemote, + FieldURI, + FieldExtensions, + FieldCreatedAt, + FieldUpdatedAt, + FieldSubject, + FieldContent, + FieldIsSensitive, + FieldVisibility, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "notes" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "note_author", +} + +var ( + // MentionsPrimaryKey and MentionsColumn2 are the table columns denoting the + // primary key for the mentions relation (M2M). + MentionsPrimaryKey = []string{"note_id", "user_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // URIValidator is a validator for the "uri" field. It is called by the builders before save. + URIValidator func(string) error + // DefaultExtensions holds the default value on creation for the "extensions" field. + DefaultExtensions lysand.Extensions + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // SubjectValidator is a validator for the "subject" field. It is called by the builders before save. + SubjectValidator func(string) error + // DefaultIsSensitive holds the default value on creation for the "isSensitive" field. + DefaultIsSensitive bool + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// Visibility defines the type for the "visibility" enum field. +type Visibility string + +// VisibilityPublic is the default value of the Visibility enum. +const DefaultVisibility = VisibilityPublic + +// Visibility values. +const ( + VisibilityPublic Visibility = "public" + VisibilityUnlisted Visibility = "unlisted" + VisibilityFollowers Visibility = "followers" + VisibilityDirect Visibility = "direct" +) + +func (v Visibility) String() string { + return string(v) +} + +// VisibilityValidator is a validator for the "visibility" field enum values. It is called by the builders before save. +func VisibilityValidator(v Visibility) error { + switch v { + case VisibilityPublic, VisibilityUnlisted, VisibilityFollowers, VisibilityDirect: + return nil + default: + return fmt.Errorf("note: invalid enum value for visibility field: %q", v) + } +} + +// OrderOption defines the ordering options for the Note queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByIsRemote orders the results by the isRemote field. +func ByIsRemote(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsRemote, opts...).ToFunc() +} + +// ByURI orders the results by the uri field. +func ByURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURI, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// BySubject orders the results by the subject field. +func BySubject(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldSubject, opts...).ToFunc() +} + +// ByContent orders the results by the content field. +func ByContent(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldContent, opts...).ToFunc() +} + +// ByIsSensitive orders the results by the isSensitive field. +func ByIsSensitive(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsSensitive, opts...).ToFunc() +} + +// ByVisibility orders the results by the visibility field. +func ByVisibility(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVisibility, opts...).ToFunc() +} + +// ByAuthorField orders the results by author field. +func ByAuthorField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAuthorStep(), sql.OrderByField(field, opts...)) + } +} + +// ByMentionsCount orders the results by mentions count. +func ByMentionsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newMentionsStep(), opts...) + } +} + +// ByMentions orders the results by mentions terms. +func ByMentions(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMentionsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByAttachmentsCount orders the results by attachments count. +func ByAttachmentsCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAttachmentsStep(), opts...) + } +} + +// ByAttachments orders the results by attachments terms. +func ByAttachments(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAttachmentsStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAuthorStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AuthorInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AuthorTable, AuthorColumn), + ) +} +func newMentionsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MentionsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, MentionsTable, MentionsPrimaryKey...), + ) +} +func newAttachmentsStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AttachmentsInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) +} diff --git a/ent/note/where.go b/ent/note/where.go new file mode 100644 index 0000000..d408711 --- /dev/null +++ b/ent/note/where.go @@ -0,0 +1,501 @@ +// Code generated by ent, DO NOT EDIT. + +package note + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.Note { + return predicate.Note(sql.FieldLTE(FieldID, id)) +} + +// IsRemote applies equality check predicate on the "isRemote" field. It's identical to IsRemoteEQ. +func IsRemote(v bool) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldIsRemote, v)) +} + +// URI applies equality check predicate on the "uri" field. It's identical to URIEQ. +func URI(v string) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldURI, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Subject applies equality check predicate on the "subject" field. It's identical to SubjectEQ. +func Subject(v string) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldSubject, v)) +} + +// Content applies equality check predicate on the "content" field. It's identical to ContentEQ. +func Content(v string) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldContent, v)) +} + +// IsSensitive applies equality check predicate on the "isSensitive" field. It's identical to IsSensitiveEQ. +func IsSensitive(v bool) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldIsSensitive, v)) +} + +// IsRemoteEQ applies the EQ predicate on the "isRemote" field. +func IsRemoteEQ(v bool) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldIsRemote, v)) +} + +// IsRemoteNEQ applies the NEQ predicate on the "isRemote" field. +func IsRemoteNEQ(v bool) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldIsRemote, v)) +} + +// URIEQ applies the EQ predicate on the "uri" field. +func URIEQ(v string) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldURI, v)) +} + +// URINEQ applies the NEQ predicate on the "uri" field. +func URINEQ(v string) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldURI, v)) +} + +// URIIn applies the In predicate on the "uri" field. +func URIIn(vs ...string) predicate.Note { + return predicate.Note(sql.FieldIn(FieldURI, vs...)) +} + +// URINotIn applies the NotIn predicate on the "uri" field. +func URINotIn(vs ...string) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldURI, vs...)) +} + +// URIGT applies the GT predicate on the "uri" field. +func URIGT(v string) predicate.Note { + return predicate.Note(sql.FieldGT(FieldURI, v)) +} + +// URIGTE applies the GTE predicate on the "uri" field. +func URIGTE(v string) predicate.Note { + return predicate.Note(sql.FieldGTE(FieldURI, v)) +} + +// URILT applies the LT predicate on the "uri" field. +func URILT(v string) predicate.Note { + return predicate.Note(sql.FieldLT(FieldURI, v)) +} + +// URILTE applies the LTE predicate on the "uri" field. +func URILTE(v string) predicate.Note { + return predicate.Note(sql.FieldLTE(FieldURI, v)) +} + +// URIContains applies the Contains predicate on the "uri" field. +func URIContains(v string) predicate.Note { + return predicate.Note(sql.FieldContains(FieldURI, v)) +} + +// URIHasPrefix applies the HasPrefix predicate on the "uri" field. +func URIHasPrefix(v string) predicate.Note { + return predicate.Note(sql.FieldHasPrefix(FieldURI, v)) +} + +// URIHasSuffix applies the HasSuffix predicate on the "uri" field. +func URIHasSuffix(v string) predicate.Note { + return predicate.Note(sql.FieldHasSuffix(FieldURI, v)) +} + +// URIEqualFold applies the EqualFold predicate on the "uri" field. +func URIEqualFold(v string) predicate.Note { + return predicate.Note(sql.FieldEqualFold(FieldURI, v)) +} + +// URIContainsFold applies the ContainsFold predicate on the "uri" field. +func URIContainsFold(v string) predicate.Note { + return predicate.Note(sql.FieldContainsFold(FieldURI, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.Note { + return predicate.Note(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.Note { + return predicate.Note(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.Note { + return predicate.Note(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.Note { + return predicate.Note(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.Note { + return predicate.Note(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.Note { + return predicate.Note(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.Note { + return predicate.Note(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.Note { + return predicate.Note(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.Note { + return predicate.Note(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.Note { + return predicate.Note(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// SubjectEQ applies the EQ predicate on the "subject" field. +func SubjectEQ(v string) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldSubject, v)) +} + +// SubjectNEQ applies the NEQ predicate on the "subject" field. +func SubjectNEQ(v string) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldSubject, v)) +} + +// SubjectIn applies the In predicate on the "subject" field. +func SubjectIn(vs ...string) predicate.Note { + return predicate.Note(sql.FieldIn(FieldSubject, vs...)) +} + +// SubjectNotIn applies the NotIn predicate on the "subject" field. +func SubjectNotIn(vs ...string) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldSubject, vs...)) +} + +// SubjectGT applies the GT predicate on the "subject" field. +func SubjectGT(v string) predicate.Note { + return predicate.Note(sql.FieldGT(FieldSubject, v)) +} + +// SubjectGTE applies the GTE predicate on the "subject" field. +func SubjectGTE(v string) predicate.Note { + return predicate.Note(sql.FieldGTE(FieldSubject, v)) +} + +// SubjectLT applies the LT predicate on the "subject" field. +func SubjectLT(v string) predicate.Note { + return predicate.Note(sql.FieldLT(FieldSubject, v)) +} + +// SubjectLTE applies the LTE predicate on the "subject" field. +func SubjectLTE(v string) predicate.Note { + return predicate.Note(sql.FieldLTE(FieldSubject, v)) +} + +// SubjectContains applies the Contains predicate on the "subject" field. +func SubjectContains(v string) predicate.Note { + return predicate.Note(sql.FieldContains(FieldSubject, v)) +} + +// SubjectHasPrefix applies the HasPrefix predicate on the "subject" field. +func SubjectHasPrefix(v string) predicate.Note { + return predicate.Note(sql.FieldHasPrefix(FieldSubject, v)) +} + +// SubjectHasSuffix applies the HasSuffix predicate on the "subject" field. +func SubjectHasSuffix(v string) predicate.Note { + return predicate.Note(sql.FieldHasSuffix(FieldSubject, v)) +} + +// SubjectIsNil applies the IsNil predicate on the "subject" field. +func SubjectIsNil() predicate.Note { + return predicate.Note(sql.FieldIsNull(FieldSubject)) +} + +// SubjectNotNil applies the NotNil predicate on the "subject" field. +func SubjectNotNil() predicate.Note { + return predicate.Note(sql.FieldNotNull(FieldSubject)) +} + +// SubjectEqualFold applies the EqualFold predicate on the "subject" field. +func SubjectEqualFold(v string) predicate.Note { + return predicate.Note(sql.FieldEqualFold(FieldSubject, v)) +} + +// SubjectContainsFold applies the ContainsFold predicate on the "subject" field. +func SubjectContainsFold(v string) predicate.Note { + return predicate.Note(sql.FieldContainsFold(FieldSubject, v)) +} + +// ContentEQ applies the EQ predicate on the "content" field. +func ContentEQ(v string) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldContent, v)) +} + +// ContentNEQ applies the NEQ predicate on the "content" field. +func ContentNEQ(v string) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldContent, v)) +} + +// ContentIn applies the In predicate on the "content" field. +func ContentIn(vs ...string) predicate.Note { + return predicate.Note(sql.FieldIn(FieldContent, vs...)) +} + +// ContentNotIn applies the NotIn predicate on the "content" field. +func ContentNotIn(vs ...string) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldContent, vs...)) +} + +// ContentGT applies the GT predicate on the "content" field. +func ContentGT(v string) predicate.Note { + return predicate.Note(sql.FieldGT(FieldContent, v)) +} + +// ContentGTE applies the GTE predicate on the "content" field. +func ContentGTE(v string) predicate.Note { + return predicate.Note(sql.FieldGTE(FieldContent, v)) +} + +// ContentLT applies the LT predicate on the "content" field. +func ContentLT(v string) predicate.Note { + return predicate.Note(sql.FieldLT(FieldContent, v)) +} + +// ContentLTE applies the LTE predicate on the "content" field. +func ContentLTE(v string) predicate.Note { + return predicate.Note(sql.FieldLTE(FieldContent, v)) +} + +// ContentContains applies the Contains predicate on the "content" field. +func ContentContains(v string) predicate.Note { + return predicate.Note(sql.FieldContains(FieldContent, v)) +} + +// ContentHasPrefix applies the HasPrefix predicate on the "content" field. +func ContentHasPrefix(v string) predicate.Note { + return predicate.Note(sql.FieldHasPrefix(FieldContent, v)) +} + +// ContentHasSuffix applies the HasSuffix predicate on the "content" field. +func ContentHasSuffix(v string) predicate.Note { + return predicate.Note(sql.FieldHasSuffix(FieldContent, v)) +} + +// ContentEqualFold applies the EqualFold predicate on the "content" field. +func ContentEqualFold(v string) predicate.Note { + return predicate.Note(sql.FieldEqualFold(FieldContent, v)) +} + +// ContentContainsFold applies the ContainsFold predicate on the "content" field. +func ContentContainsFold(v string) predicate.Note { + return predicate.Note(sql.FieldContainsFold(FieldContent, v)) +} + +// IsSensitiveEQ applies the EQ predicate on the "isSensitive" field. +func IsSensitiveEQ(v bool) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldIsSensitive, v)) +} + +// IsSensitiveNEQ applies the NEQ predicate on the "isSensitive" field. +func IsSensitiveNEQ(v bool) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldIsSensitive, v)) +} + +// VisibilityEQ applies the EQ predicate on the "visibility" field. +func VisibilityEQ(v Visibility) predicate.Note { + return predicate.Note(sql.FieldEQ(FieldVisibility, v)) +} + +// VisibilityNEQ applies the NEQ predicate on the "visibility" field. +func VisibilityNEQ(v Visibility) predicate.Note { + return predicate.Note(sql.FieldNEQ(FieldVisibility, v)) +} + +// VisibilityIn applies the In predicate on the "visibility" field. +func VisibilityIn(vs ...Visibility) predicate.Note { + return predicate.Note(sql.FieldIn(FieldVisibility, vs...)) +} + +// VisibilityNotIn applies the NotIn predicate on the "visibility" field. +func VisibilityNotIn(vs ...Visibility) predicate.Note { + return predicate.Note(sql.FieldNotIn(FieldVisibility, vs...)) +} + +// HasAuthor applies the HasEdge predicate on the "author" edge. +func HasAuthor() predicate.Note { + return predicate.Note(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AuthorTable, AuthorColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAuthorWith applies the HasEdge predicate on the "author" edge with a given conditions (other predicates). +func HasAuthorWith(preds ...predicate.User) predicate.Note { + return predicate.Note(func(s *sql.Selector) { + step := newAuthorStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasMentions applies the HasEdge predicate on the "mentions" edge. +func HasMentions() predicate.Note { + return predicate.Note(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, MentionsTable, MentionsPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMentionsWith applies the HasEdge predicate on the "mentions" edge with a given conditions (other predicates). +func HasMentionsWith(preds ...predicate.User) predicate.Note { + return predicate.Note(func(s *sql.Selector) { + step := newMentionsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAttachments applies the HasEdge predicate on the "attachments" edge. +func HasAttachments() predicate.Note { + return predicate.Note(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, AttachmentsTable, AttachmentsColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAttachmentsWith applies the HasEdge predicate on the "attachments" edge with a given conditions (other predicates). +func HasAttachmentsWith(preds ...predicate.Attachment) predicate.Note { + return predicate.Note(func(s *sql.Selector) { + step := newAttachmentsStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.Note) predicate.Note { + return predicate.Note(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.Note) predicate.Note { + return predicate.Note(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.Note) predicate.Note { + return predicate.Note(sql.NotPredicates(p)) +} diff --git a/ent/note_create.go b/ent/note_create.go new file mode 100644 index 0000000..0f0bf25 --- /dev/null +++ b/ent/note_create.go @@ -0,0 +1,1087 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// NoteCreate is the builder for creating a Note entity. +type NoteCreate struct { + config + mutation *NoteMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetIsRemote sets the "isRemote" field. +func (nc *NoteCreate) SetIsRemote(b bool) *NoteCreate { + nc.mutation.SetIsRemote(b) + return nc +} + +// SetURI sets the "uri" field. +func (nc *NoteCreate) SetURI(s string) *NoteCreate { + nc.mutation.SetURI(s) + return nc +} + +// SetExtensions sets the "extensions" field. +func (nc *NoteCreate) SetExtensions(l lysand.Extensions) *NoteCreate { + nc.mutation.SetExtensions(l) + return nc +} + +// SetCreatedAt sets the "created_at" field. +func (nc *NoteCreate) SetCreatedAt(t time.Time) *NoteCreate { + nc.mutation.SetCreatedAt(t) + return nc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (nc *NoteCreate) SetNillableCreatedAt(t *time.Time) *NoteCreate { + if t != nil { + nc.SetCreatedAt(*t) + } + return nc +} + +// SetUpdatedAt sets the "updated_at" field. +func (nc *NoteCreate) SetUpdatedAt(t time.Time) *NoteCreate { + nc.mutation.SetUpdatedAt(t) + return nc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (nc *NoteCreate) SetNillableUpdatedAt(t *time.Time) *NoteCreate { + if t != nil { + nc.SetUpdatedAt(*t) + } + return nc +} + +// SetSubject sets the "subject" field. +func (nc *NoteCreate) SetSubject(s string) *NoteCreate { + nc.mutation.SetSubject(s) + return nc +} + +// SetNillableSubject sets the "subject" field if the given value is not nil. +func (nc *NoteCreate) SetNillableSubject(s *string) *NoteCreate { + if s != nil { + nc.SetSubject(*s) + } + return nc +} + +// SetContent sets the "content" field. +func (nc *NoteCreate) SetContent(s string) *NoteCreate { + nc.mutation.SetContent(s) + return nc +} + +// SetIsSensitive sets the "isSensitive" field. +func (nc *NoteCreate) SetIsSensitive(b bool) *NoteCreate { + nc.mutation.SetIsSensitive(b) + return nc +} + +// SetNillableIsSensitive sets the "isSensitive" field if the given value is not nil. +func (nc *NoteCreate) SetNillableIsSensitive(b *bool) *NoteCreate { + if b != nil { + nc.SetIsSensitive(*b) + } + return nc +} + +// SetVisibility sets the "visibility" field. +func (nc *NoteCreate) SetVisibility(n note.Visibility) *NoteCreate { + nc.mutation.SetVisibility(n) + return nc +} + +// SetNillableVisibility sets the "visibility" field if the given value is not nil. +func (nc *NoteCreate) SetNillableVisibility(n *note.Visibility) *NoteCreate { + if n != nil { + nc.SetVisibility(*n) + } + return nc +} + +// SetID sets the "id" field. +func (nc *NoteCreate) SetID(u uuid.UUID) *NoteCreate { + nc.mutation.SetID(u) + return nc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (nc *NoteCreate) SetNillableID(u *uuid.UUID) *NoteCreate { + if u != nil { + nc.SetID(*u) + } + return nc +} + +// SetAuthorID sets the "author" edge to the User entity by ID. +func (nc *NoteCreate) SetAuthorID(id uuid.UUID) *NoteCreate { + nc.mutation.SetAuthorID(id) + return nc +} + +// SetAuthor sets the "author" edge to the User entity. +func (nc *NoteCreate) SetAuthor(u *User) *NoteCreate { + return nc.SetAuthorID(u.ID) +} + +// AddMentionIDs adds the "mentions" edge to the User entity by IDs. +func (nc *NoteCreate) AddMentionIDs(ids ...uuid.UUID) *NoteCreate { + nc.mutation.AddMentionIDs(ids...) + return nc +} + +// AddMentions adds the "mentions" edges to the User entity. +func (nc *NoteCreate) AddMentions(u ...*User) *NoteCreate { + ids := make([]uuid.UUID, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return nc.AddMentionIDs(ids...) +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (nc *NoteCreate) AddAttachmentIDs(ids ...uuid.UUID) *NoteCreate { + nc.mutation.AddAttachmentIDs(ids...) + return nc +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (nc *NoteCreate) AddAttachments(a ...*Attachment) *NoteCreate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return nc.AddAttachmentIDs(ids...) +} + +// Mutation returns the NoteMutation object of the builder. +func (nc *NoteCreate) Mutation() *NoteMutation { + return nc.mutation +} + +// Save creates the Note in the database. +func (nc *NoteCreate) Save(ctx context.Context) (*Note, error) { + nc.defaults() + return withHooks(ctx, nc.sqlSave, nc.mutation, nc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (nc *NoteCreate) SaveX(ctx context.Context) *Note { + v, err := nc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (nc *NoteCreate) Exec(ctx context.Context) error { + _, err := nc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nc *NoteCreate) ExecX(ctx context.Context) { + if err := nc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nc *NoteCreate) defaults() { + if _, ok := nc.mutation.Extensions(); !ok { + v := note.DefaultExtensions + nc.mutation.SetExtensions(v) + } + if _, ok := nc.mutation.CreatedAt(); !ok { + v := note.DefaultCreatedAt() + nc.mutation.SetCreatedAt(v) + } + if _, ok := nc.mutation.UpdatedAt(); !ok { + v := note.DefaultUpdatedAt() + nc.mutation.SetUpdatedAt(v) + } + if _, ok := nc.mutation.IsSensitive(); !ok { + v := note.DefaultIsSensitive + nc.mutation.SetIsSensitive(v) + } + if _, ok := nc.mutation.Visibility(); !ok { + v := note.DefaultVisibility + nc.mutation.SetVisibility(v) + } + if _, ok := nc.mutation.ID(); !ok { + v := note.DefaultID() + nc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nc *NoteCreate) check() error { + if _, ok := nc.mutation.IsRemote(); !ok { + return &ValidationError{Name: "isRemote", err: errors.New(`ent: missing required field "Note.isRemote"`)} + } + if _, ok := nc.mutation.URI(); !ok { + return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "Note.uri"`)} + } + if v, ok := nc.mutation.URI(); ok { + if err := note.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Note.uri": %w`, err)} + } + } + if _, ok := nc.mutation.Extensions(); !ok { + return &ValidationError{Name: "extensions", err: errors.New(`ent: missing required field "Note.extensions"`)} + } + if _, ok := nc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "Note.created_at"`)} + } + if _, ok := nc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "Note.updated_at"`)} + } + if v, ok := nc.mutation.Subject(); ok { + if err := note.SubjectValidator(v); err != nil { + return &ValidationError{Name: "subject", err: fmt.Errorf(`ent: validator failed for field "Note.subject": %w`, err)} + } + } + if _, ok := nc.mutation.Content(); !ok { + return &ValidationError{Name: "content", err: errors.New(`ent: missing required field "Note.content"`)} + } + if _, ok := nc.mutation.IsSensitive(); !ok { + return &ValidationError{Name: "isSensitive", err: errors.New(`ent: missing required field "Note.isSensitive"`)} + } + if _, ok := nc.mutation.Visibility(); !ok { + return &ValidationError{Name: "visibility", err: errors.New(`ent: missing required field "Note.visibility"`)} + } + if v, ok := nc.mutation.Visibility(); ok { + if err := note.VisibilityValidator(v); err != nil { + return &ValidationError{Name: "visibility", err: fmt.Errorf(`ent: validator failed for field "Note.visibility": %w`, err)} + } + } + if _, ok := nc.mutation.AuthorID(); !ok { + return &ValidationError{Name: "author", err: errors.New(`ent: missing required edge "Note.author"`)} + } + return nil +} + +func (nc *NoteCreate) sqlSave(ctx context.Context) (*Note, error) { + if err := nc.check(); err != nil { + return nil, err + } + _node, _spec := nc.createSpec() + if err := sqlgraph.CreateNode(ctx, nc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + nc.mutation.id = &_node.ID + nc.mutation.done = true + return _node, nil +} + +func (nc *NoteCreate) createSpec() (*Note, *sqlgraph.CreateSpec) { + var ( + _node = &Note{config: nc.config} + _spec = sqlgraph.NewCreateSpec(note.Table, sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = nc.conflict + if id, ok := nc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := nc.mutation.IsRemote(); ok { + _spec.SetField(note.FieldIsRemote, field.TypeBool, value) + _node.IsRemote = value + } + if value, ok := nc.mutation.URI(); ok { + _spec.SetField(note.FieldURI, field.TypeString, value) + _node.URI = value + } + if value, ok := nc.mutation.Extensions(); ok { + _spec.SetField(note.FieldExtensions, field.TypeJSON, value) + _node.Extensions = value + } + if value, ok := nc.mutation.CreatedAt(); ok { + _spec.SetField(note.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := nc.mutation.UpdatedAt(); ok { + _spec.SetField(note.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := nc.mutation.Subject(); ok { + _spec.SetField(note.FieldSubject, field.TypeString, value) + _node.Subject = &value + } + if value, ok := nc.mutation.Content(); ok { + _spec.SetField(note.FieldContent, field.TypeString, value) + _node.Content = value + } + if value, ok := nc.mutation.IsSensitive(); ok { + _spec.SetField(note.FieldIsSensitive, field.TypeBool, value) + _node.IsSensitive = value + } + if value, ok := nc.mutation.Visibility(); ok { + _spec.SetField(note.FieldVisibility, field.TypeEnum, value) + _node.Visibility = value + } + if nodes := nc.mutation.AuthorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: note.AuthorTable, + Columns: []string{note.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.note_author = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := nc.mutation.MentionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := nc.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Note.Create(). +// SetIsRemote(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.NoteUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (nc *NoteCreate) OnConflict(opts ...sql.ConflictOption) *NoteUpsertOne { + nc.conflict = opts + return &NoteUpsertOne{ + create: nc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Note.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (nc *NoteCreate) OnConflictColumns(columns ...string) *NoteUpsertOne { + nc.conflict = append(nc.conflict, sql.ConflictColumns(columns...)) + return &NoteUpsertOne{ + create: nc, + } +} + +type ( + // NoteUpsertOne is the builder for "upsert"-ing + // one Note node. + NoteUpsertOne struct { + create *NoteCreate + } + + // NoteUpsert is the "OnConflict" setter. + NoteUpsert struct { + *sql.UpdateSet + } +) + +// SetIsRemote sets the "isRemote" field. +func (u *NoteUpsert) SetIsRemote(v bool) *NoteUpsert { + u.Set(note.FieldIsRemote, v) + return u +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *NoteUpsert) UpdateIsRemote() *NoteUpsert { + u.SetExcluded(note.FieldIsRemote) + return u +} + +// SetURI sets the "uri" field. +func (u *NoteUpsert) SetURI(v string) *NoteUpsert { + u.Set(note.FieldURI, v) + return u +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *NoteUpsert) UpdateURI() *NoteUpsert { + u.SetExcluded(note.FieldURI) + return u +} + +// SetExtensions sets the "extensions" field. +func (u *NoteUpsert) SetExtensions(v lysand.Extensions) *NoteUpsert { + u.Set(note.FieldExtensions, v) + return u +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *NoteUpsert) UpdateExtensions() *NoteUpsert { + u.SetExcluded(note.FieldExtensions) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *NoteUpsert) SetUpdatedAt(v time.Time) *NoteUpsert { + u.Set(note.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *NoteUpsert) UpdateUpdatedAt() *NoteUpsert { + u.SetExcluded(note.FieldUpdatedAt) + return u +} + +// SetSubject sets the "subject" field. +func (u *NoteUpsert) SetSubject(v string) *NoteUpsert { + u.Set(note.FieldSubject, v) + return u +} + +// UpdateSubject sets the "subject" field to the value that was provided on create. +func (u *NoteUpsert) UpdateSubject() *NoteUpsert { + u.SetExcluded(note.FieldSubject) + return u +} + +// ClearSubject clears the value of the "subject" field. +func (u *NoteUpsert) ClearSubject() *NoteUpsert { + u.SetNull(note.FieldSubject) + return u +} + +// SetContent sets the "content" field. +func (u *NoteUpsert) SetContent(v string) *NoteUpsert { + u.Set(note.FieldContent, v) + return u +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *NoteUpsert) UpdateContent() *NoteUpsert { + u.SetExcluded(note.FieldContent) + return u +} + +// SetIsSensitive sets the "isSensitive" field. +func (u *NoteUpsert) SetIsSensitive(v bool) *NoteUpsert { + u.Set(note.FieldIsSensitive, v) + return u +} + +// UpdateIsSensitive sets the "isSensitive" field to the value that was provided on create. +func (u *NoteUpsert) UpdateIsSensitive() *NoteUpsert { + u.SetExcluded(note.FieldIsSensitive) + return u +} + +// SetVisibility sets the "visibility" field. +func (u *NoteUpsert) SetVisibility(v note.Visibility) *NoteUpsert { + u.Set(note.FieldVisibility, v) + return u +} + +// UpdateVisibility sets the "visibility" field to the value that was provided on create. +func (u *NoteUpsert) UpdateVisibility() *NoteUpsert { + u.SetExcluded(note.FieldVisibility) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.Note.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(note.FieldID) +// }), +// ). +// Exec(ctx) +func (u *NoteUpsertOne) UpdateNewValues() *NoteUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(note.FieldID) + } + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(note.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Note.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *NoteUpsertOne) Ignore() *NoteUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *NoteUpsertOne) DoNothing() *NoteUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the NoteCreate.OnConflict +// documentation for more info. +func (u *NoteUpsertOne) Update(set func(*NoteUpsert)) *NoteUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&NoteUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *NoteUpsertOne) SetIsRemote(v bool) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateIsRemote() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *NoteUpsertOne) SetURI(v string) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateURI() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *NoteUpsertOne) SetExtensions(v lysand.Extensions) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateExtensions() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *NoteUpsertOne) SetUpdatedAt(v time.Time) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateUpdatedAt() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetSubject sets the "subject" field. +func (u *NoteUpsertOne) SetSubject(v string) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetSubject(v) + }) +} + +// UpdateSubject sets the "subject" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateSubject() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateSubject() + }) +} + +// ClearSubject clears the value of the "subject" field. +func (u *NoteUpsertOne) ClearSubject() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.ClearSubject() + }) +} + +// SetContent sets the "content" field. +func (u *NoteUpsertOne) SetContent(v string) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetContent(v) + }) +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateContent() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateContent() + }) +} + +// SetIsSensitive sets the "isSensitive" field. +func (u *NoteUpsertOne) SetIsSensitive(v bool) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetIsSensitive(v) + }) +} + +// UpdateIsSensitive sets the "isSensitive" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateIsSensitive() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateIsSensitive() + }) +} + +// SetVisibility sets the "visibility" field. +func (u *NoteUpsertOne) SetVisibility(v note.Visibility) *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.SetVisibility(v) + }) +} + +// UpdateVisibility sets the "visibility" field to the value that was provided on create. +func (u *NoteUpsertOne) UpdateVisibility() *NoteUpsertOne { + return u.Update(func(s *NoteUpsert) { + s.UpdateVisibility() + }) +} + +// Exec executes the query. +func (u *NoteUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for NoteCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *NoteUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *NoteUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: NoteUpsertOne.ID is not supported by MySQL driver. Use NoteUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *NoteUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// NoteCreateBulk is the builder for creating many Note entities in bulk. +type NoteCreateBulk struct { + config + err error + builders []*NoteCreate + conflict []sql.ConflictOption +} + +// Save creates the Note entities in the database. +func (ncb *NoteCreateBulk) Save(ctx context.Context) ([]*Note, error) { + if ncb.err != nil { + return nil, ncb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ncb.builders)) + nodes := make([]*Note, len(ncb.builders)) + mutators := make([]Mutator, len(ncb.builders)) + for i := range ncb.builders { + func(i int, root context.Context) { + builder := ncb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*NoteMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ncb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = ncb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ncb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ncb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ncb *NoteCreateBulk) SaveX(ctx context.Context) []*Note { + v, err := ncb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ncb *NoteCreateBulk) Exec(ctx context.Context) error { + _, err := ncb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ncb *NoteCreateBulk) ExecX(ctx context.Context) { + if err := ncb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.Note.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.NoteUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (ncb *NoteCreateBulk) OnConflict(opts ...sql.ConflictOption) *NoteUpsertBulk { + ncb.conflict = opts + return &NoteUpsertBulk{ + create: ncb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.Note.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ncb *NoteCreateBulk) OnConflictColumns(columns ...string) *NoteUpsertBulk { + ncb.conflict = append(ncb.conflict, sql.ConflictColumns(columns...)) + return &NoteUpsertBulk{ + create: ncb, + } +} + +// NoteUpsertBulk is the builder for "upsert"-ing +// a bulk of Note nodes. +type NoteUpsertBulk struct { + create *NoteCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.Note.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(note.FieldID) +// }), +// ). +// Exec(ctx) +func (u *NoteUpsertBulk) UpdateNewValues() *NoteUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(note.FieldID) + } + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(note.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.Note.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *NoteUpsertBulk) Ignore() *NoteUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *NoteUpsertBulk) DoNothing() *NoteUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the NoteCreateBulk.OnConflict +// documentation for more info. +func (u *NoteUpsertBulk) Update(set func(*NoteUpsert)) *NoteUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&NoteUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *NoteUpsertBulk) SetIsRemote(v bool) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateIsRemote() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *NoteUpsertBulk) SetURI(v string) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateURI() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *NoteUpsertBulk) SetExtensions(v lysand.Extensions) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateExtensions() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *NoteUpsertBulk) SetUpdatedAt(v time.Time) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateUpdatedAt() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetSubject sets the "subject" field. +func (u *NoteUpsertBulk) SetSubject(v string) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetSubject(v) + }) +} + +// UpdateSubject sets the "subject" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateSubject() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateSubject() + }) +} + +// ClearSubject clears the value of the "subject" field. +func (u *NoteUpsertBulk) ClearSubject() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.ClearSubject() + }) +} + +// SetContent sets the "content" field. +func (u *NoteUpsertBulk) SetContent(v string) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetContent(v) + }) +} + +// UpdateContent sets the "content" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateContent() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateContent() + }) +} + +// SetIsSensitive sets the "isSensitive" field. +func (u *NoteUpsertBulk) SetIsSensitive(v bool) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetIsSensitive(v) + }) +} + +// UpdateIsSensitive sets the "isSensitive" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateIsSensitive() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateIsSensitive() + }) +} + +// SetVisibility sets the "visibility" field. +func (u *NoteUpsertBulk) SetVisibility(v note.Visibility) *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.SetVisibility(v) + }) +} + +// UpdateVisibility sets the "visibility" field to the value that was provided on create. +func (u *NoteUpsertBulk) UpdateVisibility() *NoteUpsertBulk { + return u.Update(func(s *NoteUpsert) { + s.UpdateVisibility() + }) +} + +// Exec executes the query. +func (u *NoteUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the NoteCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for NoteCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *NoteUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/note_delete.go b/ent/note_delete.go new file mode 100644 index 0000000..842d6b9 --- /dev/null +++ b/ent/note_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// NoteDelete is the builder for deleting a Note entity. +type NoteDelete struct { + config + hooks []Hook + mutation *NoteMutation +} + +// Where appends a list predicates to the NoteDelete builder. +func (nd *NoteDelete) Where(ps ...predicate.Note) *NoteDelete { + nd.mutation.Where(ps...) + return nd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (nd *NoteDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, nd.sqlExec, nd.mutation, nd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (nd *NoteDelete) ExecX(ctx context.Context) int { + n, err := nd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (nd *NoteDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(note.Table, sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID)) + if ps := nd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, nd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + nd.mutation.done = true + return affected, err +} + +// NoteDeleteOne is the builder for deleting a single Note entity. +type NoteDeleteOne struct { + nd *NoteDelete +} + +// Where appends a list predicates to the NoteDelete builder. +func (ndo *NoteDeleteOne) Where(ps ...predicate.Note) *NoteDeleteOne { + ndo.nd.mutation.Where(ps...) + return ndo +} + +// Exec executes the deletion query. +func (ndo *NoteDeleteOne) Exec(ctx context.Context) error { + n, err := ndo.nd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{note.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (ndo *NoteDeleteOne) ExecX(ctx context.Context) { + if err := ndo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/note_query.go b/ent/note_query.go new file mode 100644 index 0000000..f9030ea --- /dev/null +++ b/ent/note_query.go @@ -0,0 +1,794 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" +) + +// NoteQuery is the builder for querying Note entities. +type NoteQuery struct { + config + ctx *QueryContext + order []note.OrderOption + inters []Interceptor + predicates []predicate.Note + withAuthor *UserQuery + withMentions *UserQuery + withAttachments *AttachmentQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the NoteQuery builder. +func (nq *NoteQuery) Where(ps ...predicate.Note) *NoteQuery { + nq.predicates = append(nq.predicates, ps...) + return nq +} + +// Limit the number of records to be returned by this query. +func (nq *NoteQuery) Limit(limit int) *NoteQuery { + nq.ctx.Limit = &limit + return nq +} + +// Offset to start from. +func (nq *NoteQuery) Offset(offset int) *NoteQuery { + nq.ctx.Offset = &offset + return nq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (nq *NoteQuery) Unique(unique bool) *NoteQuery { + nq.ctx.Unique = &unique + return nq +} + +// Order specifies how the records should be ordered. +func (nq *NoteQuery) Order(o ...note.OrderOption) *NoteQuery { + nq.order = append(nq.order, o...) + return nq +} + +// QueryAuthor chains the current query on the "author" edge. +func (nq *NoteQuery) QueryAuthor() *UserQuery { + query := (&UserClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(note.Table, note.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, note.AuthorTable, note.AuthorColumn), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryMentions chains the current query on the "mentions" edge. +func (nq *NoteQuery) QueryMentions() *UserQuery { + query := (&UserClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(note.Table, note.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2M, false, note.MentionsTable, note.MentionsPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAttachments chains the current query on the "attachments" edge. +func (nq *NoteQuery) QueryAttachments() *AttachmentQuery { + query := (&AttachmentClient{config: nq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := nq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(note.Table, note.FieldID, selector), + sqlgraph.To(attachment.Table, attachment.FieldID), + sqlgraph.Edge(sqlgraph.O2M, false, note.AttachmentsTable, note.AttachmentsColumn), + ) + fromU = sqlgraph.SetNeighbors(nq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first Note entity from the query. +// Returns a *NotFoundError when no Note was found. +func (nq *NoteQuery) First(ctx context.Context) (*Note, error) { + nodes, err := nq.Limit(1).All(setContextOp(ctx, nq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{note.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (nq *NoteQuery) FirstX(ctx context.Context) *Note { + node, err := nq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first Note ID from the query. +// Returns a *NotFoundError when no Note ID was found. +func (nq *NoteQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = nq.Limit(1).IDs(setContextOp(ctx, nq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{note.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (nq *NoteQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := nq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single Note entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one Note entity is found. +// Returns a *NotFoundError when no Note entities are found. +func (nq *NoteQuery) Only(ctx context.Context) (*Note, error) { + nodes, err := nq.Limit(2).All(setContextOp(ctx, nq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{note.Label} + default: + return nil, &NotSingularError{note.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (nq *NoteQuery) OnlyX(ctx context.Context) *Note { + node, err := nq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only Note ID in the query. +// Returns a *NotSingularError when more than one Note ID is found. +// Returns a *NotFoundError when no entities are found. +func (nq *NoteQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = nq.Limit(2).IDs(setContextOp(ctx, nq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{note.Label} + default: + err = &NotSingularError{note.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (nq *NoteQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := nq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Notes. +func (nq *NoteQuery) All(ctx context.Context) ([]*Note, error) { + ctx = setContextOp(ctx, nq.ctx, "All") + if err := nq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*Note, *NoteQuery]() + return withInterceptors[[]*Note](ctx, nq, qr, nq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (nq *NoteQuery) AllX(ctx context.Context) []*Note { + nodes, err := nq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of Note IDs. +func (nq *NoteQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if nq.ctx.Unique == nil && nq.path != nil { + nq.Unique(true) + } + ctx = setContextOp(ctx, nq.ctx, "IDs") + if err = nq.Select(note.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (nq *NoteQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := nq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (nq *NoteQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, nq.ctx, "Count") + if err := nq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, nq, querierCount[*NoteQuery](), nq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (nq *NoteQuery) CountX(ctx context.Context) int { + count, err := nq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (nq *NoteQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, nq.ctx, "Exist") + switch _, err := nq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (nq *NoteQuery) ExistX(ctx context.Context) bool { + exist, err := nq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the NoteQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (nq *NoteQuery) Clone() *NoteQuery { + if nq == nil { + return nil + } + return &NoteQuery{ + config: nq.config, + ctx: nq.ctx.Clone(), + order: append([]note.OrderOption{}, nq.order...), + inters: append([]Interceptor{}, nq.inters...), + predicates: append([]predicate.Note{}, nq.predicates...), + withAuthor: nq.withAuthor.Clone(), + withMentions: nq.withMentions.Clone(), + withAttachments: nq.withAttachments.Clone(), + // clone intermediate query. + sql: nq.sql.Clone(), + path: nq.path, + } +} + +// WithAuthor tells the query-builder to eager-load the nodes that are connected to +// the "author" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NoteQuery) WithAuthor(opts ...func(*UserQuery)) *NoteQuery { + query := (&UserClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withAuthor = query + return nq +} + +// WithMentions tells the query-builder to eager-load the nodes that are connected to +// the "mentions" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NoteQuery) WithMentions(opts ...func(*UserQuery)) *NoteQuery { + query := (&UserClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withMentions = query + return nq +} + +// WithAttachments tells the query-builder to eager-load the nodes that are connected to +// the "attachments" edge. The optional arguments are used to configure the query builder of the edge. +func (nq *NoteQuery) WithAttachments(opts ...func(*AttachmentQuery)) *NoteQuery { + query := (&AttachmentClient{config: nq.config}).Query() + for _, opt := range opts { + opt(query) + } + nq.withAttachments = query + return nq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.Note.Query(). +// GroupBy(note.FieldIsRemote). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (nq *NoteQuery) GroupBy(field string, fields ...string) *NoteGroupBy { + nq.ctx.Fields = append([]string{field}, fields...) + grbuild := &NoteGroupBy{build: nq} + grbuild.flds = &nq.ctx.Fields + grbuild.label = note.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// } +// +// client.Note.Query(). +// Select(note.FieldIsRemote). +// Scan(ctx, &v) +func (nq *NoteQuery) Select(fields ...string) *NoteSelect { + nq.ctx.Fields = append(nq.ctx.Fields, fields...) + sbuild := &NoteSelect{NoteQuery: nq} + sbuild.label = note.Label + sbuild.flds, sbuild.scan = &nq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a NoteSelect configured with the given aggregations. +func (nq *NoteQuery) Aggregate(fns ...AggregateFunc) *NoteSelect { + return nq.Select().Aggregate(fns...) +} + +func (nq *NoteQuery) prepareQuery(ctx context.Context) error { + for _, inter := range nq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, nq); err != nil { + return err + } + } + } + for _, f := range nq.ctx.Fields { + if !note.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if nq.path != nil { + prev, err := nq.path(ctx) + if err != nil { + return err + } + nq.sql = prev + } + return nil +} + +func (nq *NoteQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*Note, error) { + var ( + nodes = []*Note{} + withFKs = nq.withFKs + _spec = nq.querySpec() + loadedTypes = [3]bool{ + nq.withAuthor != nil, + nq.withMentions != nil, + nq.withAttachments != nil, + } + ) + if nq.withAuthor != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, note.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*Note).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &Note{config: nq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, nq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := nq.withAuthor; query != nil { + if err := nq.loadAuthor(ctx, query, nodes, nil, + func(n *Note, e *User) { n.Edges.Author = e }); err != nil { + return nil, err + } + } + if query := nq.withMentions; query != nil { + if err := nq.loadMentions(ctx, query, nodes, + func(n *Note) { n.Edges.Mentions = []*User{} }, + func(n *Note, e *User) { n.Edges.Mentions = append(n.Edges.Mentions, e) }); err != nil { + return nil, err + } + } + if query := nq.withAttachments; query != nil { + if err := nq.loadAttachments(ctx, query, nodes, + func(n *Note) { n.Edges.Attachments = []*Attachment{} }, + func(n *Note, e *Attachment) { n.Edges.Attachments = append(n.Edges.Attachments, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (nq *NoteQuery) loadAuthor(ctx context.Context, query *UserQuery, nodes []*Note, init func(*Note), assign func(*Note, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*Note) + for i := range nodes { + if nodes[i].note_author == nil { + continue + } + fk := *nodes[i].note_author + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "note_author" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (nq *NoteQuery) loadMentions(ctx context.Context, query *UserQuery, nodes []*Note, init func(*Note), assign func(*Note, *User)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*Note) + nids := make(map[uuid.UUID]map[*Note]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(note.MentionsTable) + s.Join(joinT).On(s.C(user.FieldID), joinT.C(note.MentionsPrimaryKey[1])) + s.Where(sql.InValues(joinT.C(note.MentionsPrimaryKey[0]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(note.MentionsPrimaryKey[0])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*Note]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*User](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "mentions" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} +func (nq *NoteQuery) loadAttachments(ctx context.Context, query *AttachmentQuery, nodes []*Note, init func(*Note), assign func(*Note, *Attachment)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*Note) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Attachment(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(note.AttachmentsColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.note_attachments + if fk == nil { + return fmt.Errorf(`foreign-key "note_attachments" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "note_attachments" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} + +func (nq *NoteQuery) sqlCount(ctx context.Context) (int, error) { + _spec := nq.querySpec() + _spec.Node.Columns = nq.ctx.Fields + if len(nq.ctx.Fields) > 0 { + _spec.Unique = nq.ctx.Unique != nil && *nq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, nq.driver, _spec) +} + +func (nq *NoteQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(note.Table, note.Columns, sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID)) + _spec.From = nq.sql + if unique := nq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if nq.path != nil { + _spec.Unique = true + } + if fields := nq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, note.FieldID) + for i := range fields { + if fields[i] != note.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := nq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := nq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := nq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := nq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (nq *NoteQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(nq.driver.Dialect()) + t1 := builder.Table(note.Table) + columns := nq.ctx.Fields + if len(columns) == 0 { + columns = note.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if nq.sql != nil { + selector = nq.sql + selector.Select(selector.Columns(columns...)...) + } + if nq.ctx.Unique != nil && *nq.ctx.Unique { + selector.Distinct() + } + for _, p := range nq.predicates { + p(selector) + } + for _, p := range nq.order { + p(selector) + } + if offset := nq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := nq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// NoteGroupBy is the group-by builder for Note entities. +type NoteGroupBy struct { + selector + build *NoteQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ngb *NoteGroupBy) Aggregate(fns ...AggregateFunc) *NoteGroupBy { + ngb.fns = append(ngb.fns, fns...) + return ngb +} + +// Scan applies the selector query and scans the result into the given value. +func (ngb *NoteGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ngb.build.ctx, "GroupBy") + if err := ngb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NoteQuery, *NoteGroupBy](ctx, ngb.build, ngb, ngb.build.inters, v) +} + +func (ngb *NoteGroupBy) sqlScan(ctx context.Context, root *NoteQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ngb.fns)) + for _, fn := range ngb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ngb.flds)+len(ngb.fns)) + for _, f := range *ngb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ngb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ngb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// NoteSelect is the builder for selecting fields of Note entities. +type NoteSelect struct { + *NoteQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (ns *NoteSelect) Aggregate(fns ...AggregateFunc) *NoteSelect { + ns.fns = append(ns.fns, fns...) + return ns +} + +// Scan applies the selector query and scans the result into the given value. +func (ns *NoteSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ns.ctx, "Select") + if err := ns.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*NoteQuery, *NoteSelect](ctx, ns.NoteQuery, ns, ns.inters, v) +} + +func (ns *NoteSelect) sqlScan(ctx context.Context, root *NoteQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(ns.fns)) + for _, fn := range ns.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*ns.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ns.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/note_update.go b/ent/note_update.go new file mode 100644 index 0000000..de14122 --- /dev/null +++ b/ent/note_update.go @@ -0,0 +1,924 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// NoteUpdate is the builder for updating Note entities. +type NoteUpdate struct { + config + hooks []Hook + mutation *NoteMutation +} + +// Where appends a list predicates to the NoteUpdate builder. +func (nu *NoteUpdate) Where(ps ...predicate.Note) *NoteUpdate { + nu.mutation.Where(ps...) + return nu +} + +// SetIsRemote sets the "isRemote" field. +func (nu *NoteUpdate) SetIsRemote(b bool) *NoteUpdate { + nu.mutation.SetIsRemote(b) + return nu +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (nu *NoteUpdate) SetNillableIsRemote(b *bool) *NoteUpdate { + if b != nil { + nu.SetIsRemote(*b) + } + return nu +} + +// SetURI sets the "uri" field. +func (nu *NoteUpdate) SetURI(s string) *NoteUpdate { + nu.mutation.SetURI(s) + return nu +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (nu *NoteUpdate) SetNillableURI(s *string) *NoteUpdate { + if s != nil { + nu.SetURI(*s) + } + return nu +} + +// SetExtensions sets the "extensions" field. +func (nu *NoteUpdate) SetExtensions(l lysand.Extensions) *NoteUpdate { + nu.mutation.SetExtensions(l) + return nu +} + +// SetUpdatedAt sets the "updated_at" field. +func (nu *NoteUpdate) SetUpdatedAt(t time.Time) *NoteUpdate { + nu.mutation.SetUpdatedAt(t) + return nu +} + +// SetSubject sets the "subject" field. +func (nu *NoteUpdate) SetSubject(s string) *NoteUpdate { + nu.mutation.SetSubject(s) + return nu +} + +// SetNillableSubject sets the "subject" field if the given value is not nil. +func (nu *NoteUpdate) SetNillableSubject(s *string) *NoteUpdate { + if s != nil { + nu.SetSubject(*s) + } + return nu +} + +// ClearSubject clears the value of the "subject" field. +func (nu *NoteUpdate) ClearSubject() *NoteUpdate { + nu.mutation.ClearSubject() + return nu +} + +// SetContent sets the "content" field. +func (nu *NoteUpdate) SetContent(s string) *NoteUpdate { + nu.mutation.SetContent(s) + return nu +} + +// SetNillableContent sets the "content" field if the given value is not nil. +func (nu *NoteUpdate) SetNillableContent(s *string) *NoteUpdate { + if s != nil { + nu.SetContent(*s) + } + return nu +} + +// SetIsSensitive sets the "isSensitive" field. +func (nu *NoteUpdate) SetIsSensitive(b bool) *NoteUpdate { + nu.mutation.SetIsSensitive(b) + return nu +} + +// SetNillableIsSensitive sets the "isSensitive" field if the given value is not nil. +func (nu *NoteUpdate) SetNillableIsSensitive(b *bool) *NoteUpdate { + if b != nil { + nu.SetIsSensitive(*b) + } + return nu +} + +// SetVisibility sets the "visibility" field. +func (nu *NoteUpdate) SetVisibility(n note.Visibility) *NoteUpdate { + nu.mutation.SetVisibility(n) + return nu +} + +// SetNillableVisibility sets the "visibility" field if the given value is not nil. +func (nu *NoteUpdate) SetNillableVisibility(n *note.Visibility) *NoteUpdate { + if n != nil { + nu.SetVisibility(*n) + } + return nu +} + +// SetAuthorID sets the "author" edge to the User entity by ID. +func (nu *NoteUpdate) SetAuthorID(id uuid.UUID) *NoteUpdate { + nu.mutation.SetAuthorID(id) + return nu +} + +// SetAuthor sets the "author" edge to the User entity. +func (nu *NoteUpdate) SetAuthor(u *User) *NoteUpdate { + return nu.SetAuthorID(u.ID) +} + +// AddMentionIDs adds the "mentions" edge to the User entity by IDs. +func (nu *NoteUpdate) AddMentionIDs(ids ...uuid.UUID) *NoteUpdate { + nu.mutation.AddMentionIDs(ids...) + return nu +} + +// AddMentions adds the "mentions" edges to the User entity. +func (nu *NoteUpdate) AddMentions(u ...*User) *NoteUpdate { + ids := make([]uuid.UUID, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return nu.AddMentionIDs(ids...) +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (nu *NoteUpdate) AddAttachmentIDs(ids ...uuid.UUID) *NoteUpdate { + nu.mutation.AddAttachmentIDs(ids...) + return nu +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (nu *NoteUpdate) AddAttachments(a ...*Attachment) *NoteUpdate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return nu.AddAttachmentIDs(ids...) +} + +// Mutation returns the NoteMutation object of the builder. +func (nu *NoteUpdate) Mutation() *NoteMutation { + return nu.mutation +} + +// ClearAuthor clears the "author" edge to the User entity. +func (nu *NoteUpdate) ClearAuthor() *NoteUpdate { + nu.mutation.ClearAuthor() + return nu +} + +// ClearMentions clears all "mentions" edges to the User entity. +func (nu *NoteUpdate) ClearMentions() *NoteUpdate { + nu.mutation.ClearMentions() + return nu +} + +// RemoveMentionIDs removes the "mentions" edge to User entities by IDs. +func (nu *NoteUpdate) RemoveMentionIDs(ids ...uuid.UUID) *NoteUpdate { + nu.mutation.RemoveMentionIDs(ids...) + return nu +} + +// RemoveMentions removes "mentions" edges to User entities. +func (nu *NoteUpdate) RemoveMentions(u ...*User) *NoteUpdate { + ids := make([]uuid.UUID, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return nu.RemoveMentionIDs(ids...) +} + +// ClearAttachments clears all "attachments" edges to the Attachment entity. +func (nu *NoteUpdate) ClearAttachments() *NoteUpdate { + nu.mutation.ClearAttachments() + return nu +} + +// RemoveAttachmentIDs removes the "attachments" edge to Attachment entities by IDs. +func (nu *NoteUpdate) RemoveAttachmentIDs(ids ...uuid.UUID) *NoteUpdate { + nu.mutation.RemoveAttachmentIDs(ids...) + return nu +} + +// RemoveAttachments removes "attachments" edges to Attachment entities. +func (nu *NoteUpdate) RemoveAttachments(a ...*Attachment) *NoteUpdate { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return nu.RemoveAttachmentIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (nu *NoteUpdate) Save(ctx context.Context) (int, error) { + nu.defaults() + return withHooks(ctx, nu.sqlSave, nu.mutation, nu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nu *NoteUpdate) SaveX(ctx context.Context) int { + affected, err := nu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (nu *NoteUpdate) Exec(ctx context.Context) error { + _, err := nu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nu *NoteUpdate) ExecX(ctx context.Context) { + if err := nu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nu *NoteUpdate) defaults() { + if _, ok := nu.mutation.UpdatedAt(); !ok { + v := note.UpdateDefaultUpdatedAt() + nu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nu *NoteUpdate) check() error { + if v, ok := nu.mutation.URI(); ok { + if err := note.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Note.uri": %w`, err)} + } + } + if v, ok := nu.mutation.Subject(); ok { + if err := note.SubjectValidator(v); err != nil { + return &ValidationError{Name: "subject", err: fmt.Errorf(`ent: validator failed for field "Note.subject": %w`, err)} + } + } + if v, ok := nu.mutation.Visibility(); ok { + if err := note.VisibilityValidator(v); err != nil { + return &ValidationError{Name: "visibility", err: fmt.Errorf(`ent: validator failed for field "Note.visibility": %w`, err)} + } + } + if _, ok := nu.mutation.AuthorID(); nu.mutation.AuthorCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Note.author"`) + } + return nil +} + +func (nu *NoteUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := nu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(note.Table, note.Columns, sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID)) + if ps := nu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nu.mutation.IsRemote(); ok { + _spec.SetField(note.FieldIsRemote, field.TypeBool, value) + } + if value, ok := nu.mutation.URI(); ok { + _spec.SetField(note.FieldURI, field.TypeString, value) + } + if value, ok := nu.mutation.Extensions(); ok { + _spec.SetField(note.FieldExtensions, field.TypeJSON, value) + } + if value, ok := nu.mutation.UpdatedAt(); ok { + _spec.SetField(note.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := nu.mutation.Subject(); ok { + _spec.SetField(note.FieldSubject, field.TypeString, value) + } + if nu.mutation.SubjectCleared() { + _spec.ClearField(note.FieldSubject, field.TypeString) + } + if value, ok := nu.mutation.Content(); ok { + _spec.SetField(note.FieldContent, field.TypeString, value) + } + if value, ok := nu.mutation.IsSensitive(); ok { + _spec.SetField(note.FieldIsSensitive, field.TypeBool, value) + } + if value, ok := nu.mutation.Visibility(); ok { + _spec.SetField(note.FieldVisibility, field.TypeEnum, value) + } + if nu.mutation.AuthorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: note.AuthorTable, + Columns: []string{note.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.AuthorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: note.AuthorTable, + Columns: []string{note.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nu.mutation.MentionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.RemovedMentionsIDs(); len(nodes) > 0 && !nu.mutation.MentionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.MentionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nu.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !nu.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nu.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, nu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{note.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + nu.mutation.done = true + return n, nil +} + +// NoteUpdateOne is the builder for updating a single Note entity. +type NoteUpdateOne struct { + config + fields []string + hooks []Hook + mutation *NoteMutation +} + +// SetIsRemote sets the "isRemote" field. +func (nuo *NoteUpdateOne) SetIsRemote(b bool) *NoteUpdateOne { + nuo.mutation.SetIsRemote(b) + return nuo +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (nuo *NoteUpdateOne) SetNillableIsRemote(b *bool) *NoteUpdateOne { + if b != nil { + nuo.SetIsRemote(*b) + } + return nuo +} + +// SetURI sets the "uri" field. +func (nuo *NoteUpdateOne) SetURI(s string) *NoteUpdateOne { + nuo.mutation.SetURI(s) + return nuo +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (nuo *NoteUpdateOne) SetNillableURI(s *string) *NoteUpdateOne { + if s != nil { + nuo.SetURI(*s) + } + return nuo +} + +// SetExtensions sets the "extensions" field. +func (nuo *NoteUpdateOne) SetExtensions(l lysand.Extensions) *NoteUpdateOne { + nuo.mutation.SetExtensions(l) + return nuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (nuo *NoteUpdateOne) SetUpdatedAt(t time.Time) *NoteUpdateOne { + nuo.mutation.SetUpdatedAt(t) + return nuo +} + +// SetSubject sets the "subject" field. +func (nuo *NoteUpdateOne) SetSubject(s string) *NoteUpdateOne { + nuo.mutation.SetSubject(s) + return nuo +} + +// SetNillableSubject sets the "subject" field if the given value is not nil. +func (nuo *NoteUpdateOne) SetNillableSubject(s *string) *NoteUpdateOne { + if s != nil { + nuo.SetSubject(*s) + } + return nuo +} + +// ClearSubject clears the value of the "subject" field. +func (nuo *NoteUpdateOne) ClearSubject() *NoteUpdateOne { + nuo.mutation.ClearSubject() + return nuo +} + +// SetContent sets the "content" field. +func (nuo *NoteUpdateOne) SetContent(s string) *NoteUpdateOne { + nuo.mutation.SetContent(s) + return nuo +} + +// SetNillableContent sets the "content" field if the given value is not nil. +func (nuo *NoteUpdateOne) SetNillableContent(s *string) *NoteUpdateOne { + if s != nil { + nuo.SetContent(*s) + } + return nuo +} + +// SetIsSensitive sets the "isSensitive" field. +func (nuo *NoteUpdateOne) SetIsSensitive(b bool) *NoteUpdateOne { + nuo.mutation.SetIsSensitive(b) + return nuo +} + +// SetNillableIsSensitive sets the "isSensitive" field if the given value is not nil. +func (nuo *NoteUpdateOne) SetNillableIsSensitive(b *bool) *NoteUpdateOne { + if b != nil { + nuo.SetIsSensitive(*b) + } + return nuo +} + +// SetVisibility sets the "visibility" field. +func (nuo *NoteUpdateOne) SetVisibility(n note.Visibility) *NoteUpdateOne { + nuo.mutation.SetVisibility(n) + return nuo +} + +// SetNillableVisibility sets the "visibility" field if the given value is not nil. +func (nuo *NoteUpdateOne) SetNillableVisibility(n *note.Visibility) *NoteUpdateOne { + if n != nil { + nuo.SetVisibility(*n) + } + return nuo +} + +// SetAuthorID sets the "author" edge to the User entity by ID. +func (nuo *NoteUpdateOne) SetAuthorID(id uuid.UUID) *NoteUpdateOne { + nuo.mutation.SetAuthorID(id) + return nuo +} + +// SetAuthor sets the "author" edge to the User entity. +func (nuo *NoteUpdateOne) SetAuthor(u *User) *NoteUpdateOne { + return nuo.SetAuthorID(u.ID) +} + +// AddMentionIDs adds the "mentions" edge to the User entity by IDs. +func (nuo *NoteUpdateOne) AddMentionIDs(ids ...uuid.UUID) *NoteUpdateOne { + nuo.mutation.AddMentionIDs(ids...) + return nuo +} + +// AddMentions adds the "mentions" edges to the User entity. +func (nuo *NoteUpdateOne) AddMentions(u ...*User) *NoteUpdateOne { + ids := make([]uuid.UUID, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return nuo.AddMentionIDs(ids...) +} + +// AddAttachmentIDs adds the "attachments" edge to the Attachment entity by IDs. +func (nuo *NoteUpdateOne) AddAttachmentIDs(ids ...uuid.UUID) *NoteUpdateOne { + nuo.mutation.AddAttachmentIDs(ids...) + return nuo +} + +// AddAttachments adds the "attachments" edges to the Attachment entity. +func (nuo *NoteUpdateOne) AddAttachments(a ...*Attachment) *NoteUpdateOne { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return nuo.AddAttachmentIDs(ids...) +} + +// Mutation returns the NoteMutation object of the builder. +func (nuo *NoteUpdateOne) Mutation() *NoteMutation { + return nuo.mutation +} + +// ClearAuthor clears the "author" edge to the User entity. +func (nuo *NoteUpdateOne) ClearAuthor() *NoteUpdateOne { + nuo.mutation.ClearAuthor() + return nuo +} + +// ClearMentions clears all "mentions" edges to the User entity. +func (nuo *NoteUpdateOne) ClearMentions() *NoteUpdateOne { + nuo.mutation.ClearMentions() + return nuo +} + +// RemoveMentionIDs removes the "mentions" edge to User entities by IDs. +func (nuo *NoteUpdateOne) RemoveMentionIDs(ids ...uuid.UUID) *NoteUpdateOne { + nuo.mutation.RemoveMentionIDs(ids...) + return nuo +} + +// RemoveMentions removes "mentions" edges to User entities. +func (nuo *NoteUpdateOne) RemoveMentions(u ...*User) *NoteUpdateOne { + ids := make([]uuid.UUID, len(u)) + for i := range u { + ids[i] = u[i].ID + } + return nuo.RemoveMentionIDs(ids...) +} + +// ClearAttachments clears all "attachments" edges to the Attachment entity. +func (nuo *NoteUpdateOne) ClearAttachments() *NoteUpdateOne { + nuo.mutation.ClearAttachments() + return nuo +} + +// RemoveAttachmentIDs removes the "attachments" edge to Attachment entities by IDs. +func (nuo *NoteUpdateOne) RemoveAttachmentIDs(ids ...uuid.UUID) *NoteUpdateOne { + nuo.mutation.RemoveAttachmentIDs(ids...) + return nuo +} + +// RemoveAttachments removes "attachments" edges to Attachment entities. +func (nuo *NoteUpdateOne) RemoveAttachments(a ...*Attachment) *NoteUpdateOne { + ids := make([]uuid.UUID, len(a)) + for i := range a { + ids[i] = a[i].ID + } + return nuo.RemoveAttachmentIDs(ids...) +} + +// Where appends a list predicates to the NoteUpdate builder. +func (nuo *NoteUpdateOne) Where(ps ...predicate.Note) *NoteUpdateOne { + nuo.mutation.Where(ps...) + return nuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (nuo *NoteUpdateOne) Select(field string, fields ...string) *NoteUpdateOne { + nuo.fields = append([]string{field}, fields...) + return nuo +} + +// Save executes the query and returns the updated Note entity. +func (nuo *NoteUpdateOne) Save(ctx context.Context) (*Note, error) { + nuo.defaults() + return withHooks(ctx, nuo.sqlSave, nuo.mutation, nuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (nuo *NoteUpdateOne) SaveX(ctx context.Context) *Note { + node, err := nuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (nuo *NoteUpdateOne) Exec(ctx context.Context) error { + _, err := nuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (nuo *NoteUpdateOne) ExecX(ctx context.Context) { + if err := nuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (nuo *NoteUpdateOne) defaults() { + if _, ok := nuo.mutation.UpdatedAt(); !ok { + v := note.UpdateDefaultUpdatedAt() + nuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (nuo *NoteUpdateOne) check() error { + if v, ok := nuo.mutation.URI(); ok { + if err := note.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "Note.uri": %w`, err)} + } + } + if v, ok := nuo.mutation.Subject(); ok { + if err := note.SubjectValidator(v); err != nil { + return &ValidationError{Name: "subject", err: fmt.Errorf(`ent: validator failed for field "Note.subject": %w`, err)} + } + } + if v, ok := nuo.mutation.Visibility(); ok { + if err := note.VisibilityValidator(v); err != nil { + return &ValidationError{Name: "visibility", err: fmt.Errorf(`ent: validator failed for field "Note.visibility": %w`, err)} + } + } + if _, ok := nuo.mutation.AuthorID(); nuo.mutation.AuthorCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "Note.author"`) + } + return nil +} + +func (nuo *NoteUpdateOne) sqlSave(ctx context.Context) (_node *Note, err error) { + if err := nuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(note.Table, note.Columns, sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID)) + id, ok := nuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "Note.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := nuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, note.FieldID) + for _, f := range fields { + if !note.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != note.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := nuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := nuo.mutation.IsRemote(); ok { + _spec.SetField(note.FieldIsRemote, field.TypeBool, value) + } + if value, ok := nuo.mutation.URI(); ok { + _spec.SetField(note.FieldURI, field.TypeString, value) + } + if value, ok := nuo.mutation.Extensions(); ok { + _spec.SetField(note.FieldExtensions, field.TypeJSON, value) + } + if value, ok := nuo.mutation.UpdatedAt(); ok { + _spec.SetField(note.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := nuo.mutation.Subject(); ok { + _spec.SetField(note.FieldSubject, field.TypeString, value) + } + if nuo.mutation.SubjectCleared() { + _spec.ClearField(note.FieldSubject, field.TypeString) + } + if value, ok := nuo.mutation.Content(); ok { + _spec.SetField(note.FieldContent, field.TypeString, value) + } + if value, ok := nuo.mutation.IsSensitive(); ok { + _spec.SetField(note.FieldIsSensitive, field.TypeBool, value) + } + if value, ok := nuo.mutation.Visibility(); ok { + _spec.SetField(note.FieldVisibility, field.TypeEnum, value) + } + if nuo.mutation.AuthorCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: note.AuthorTable, + Columns: []string{note.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.AuthorIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: note.AuthorTable, + Columns: []string{note.AuthorColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nuo.mutation.MentionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.RemovedMentionsIDs(); len(nodes) > 0 && !nuo.mutation.MentionsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.MentionsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: false, + Table: note.MentionsTable, + Columns: note.MentionsPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if nuo.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.RemovedAttachmentsIDs(); len(nodes) > 0 && !nuo.mutation.AttachmentsCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := nuo.mutation.AttachmentsIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: false, + Table: note.AttachmentsTable, + Columns: []string{note.AttachmentsColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(attachment.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &Note{config: nuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, nuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{note.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + nuo.mutation.done = true + return _node, nil +} diff --git a/ent/predicate/predicate.go b/ent/predicate/predicate.go new file mode 100644 index 0000000..76a32b2 --- /dev/null +++ b/ent/predicate/predicate.go @@ -0,0 +1,25 @@ +// Code generated by ent, DO NOT EDIT. + +package predicate + +import ( + "entgo.io/ent/dialect/sql" +) + +// Attachment is the predicate function for attachment builders. +type Attachment func(*sql.Selector) + +// Follow is the predicate function for follow builders. +type Follow func(*sql.Selector) + +// Image is the predicate function for image builders. +type Image func(*sql.Selector) + +// Note is the predicate function for note builders. +type Note func(*sql.Selector) + +// ServerMetadata is the predicate function for servermetadata builders. +type ServerMetadata func(*sql.Selector) + +// User is the predicate function for user builders. +type User func(*sql.Selector) diff --git a/ent/runtime.go b/ent/runtime.go new file mode 100644 index 0000000..f2ce2c8 --- /dev/null +++ b/ent/runtime.go @@ -0,0 +1,231 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "time" + + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/attachment" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/schema" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// The init function reads all schema descriptors with runtime code +// (default values, validators, hooks and policies) and stitches it +// to their package variables. +func init() { + attachmentMixin := schema.Attachment{}.Mixin() + attachmentMixinFields0 := attachmentMixin[0].Fields() + _ = attachmentMixinFields0 + attachmentFields := schema.Attachment{}.Fields() + _ = attachmentFields + // attachmentDescURI is the schema descriptor for uri field. + attachmentDescURI := attachmentMixinFields0[2].Descriptor() + // attachment.URIValidator is a validator for the "uri" field. It is called by the builders before save. + attachment.URIValidator = attachmentDescURI.Validators[0].(func(string) error) + // attachmentDescExtensions is the schema descriptor for extensions field. + attachmentDescExtensions := attachmentMixinFields0[3].Descriptor() + // attachment.DefaultExtensions holds the default value on creation for the extensions field. + attachment.DefaultExtensions = attachmentDescExtensions.Default.(lysand.Extensions) + // attachmentDescCreatedAt is the schema descriptor for created_at field. + attachmentDescCreatedAt := attachmentMixinFields0[4].Descriptor() + // attachment.DefaultCreatedAt holds the default value on creation for the created_at field. + attachment.DefaultCreatedAt = attachmentDescCreatedAt.Default.(func() time.Time) + // attachmentDescUpdatedAt is the schema descriptor for updated_at field. + attachmentDescUpdatedAt := attachmentMixinFields0[5].Descriptor() + // attachment.DefaultUpdatedAt holds the default value on creation for the updated_at field. + attachment.DefaultUpdatedAt = attachmentDescUpdatedAt.Default.(func() time.Time) + // attachment.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + attachment.UpdateDefaultUpdatedAt = attachmentDescUpdatedAt.UpdateDefault.(func() time.Time) + // attachmentDescDescription is the schema descriptor for description field. + attachmentDescDescription := attachmentFields[0].Descriptor() + // attachment.DescriptionValidator is a validator for the "description" field. It is called by the builders before save. + attachment.DescriptionValidator = attachmentDescDescription.Validators[0].(func(string) error) + // attachmentDescID is the schema descriptor for id field. + attachmentDescID := attachmentMixinFields0[0].Descriptor() + // attachment.DefaultID holds the default value on creation for the id field. + attachment.DefaultID = attachmentDescID.Default.(func() uuid.UUID) + followMixin := schema.Follow{}.Mixin() + followMixinFields0 := followMixin[0].Fields() + _ = followMixinFields0 + followFields := schema.Follow{}.Fields() + _ = followFields + // followDescURI is the schema descriptor for uri field. + followDescURI := followMixinFields0[2].Descriptor() + // follow.URIValidator is a validator for the "uri" field. It is called by the builders before save. + follow.URIValidator = followDescURI.Validators[0].(func(string) error) + // followDescExtensions is the schema descriptor for extensions field. + followDescExtensions := followMixinFields0[3].Descriptor() + // follow.DefaultExtensions holds the default value on creation for the extensions field. + follow.DefaultExtensions = followDescExtensions.Default.(lysand.Extensions) + // followDescCreatedAt is the schema descriptor for created_at field. + followDescCreatedAt := followMixinFields0[4].Descriptor() + // follow.DefaultCreatedAt holds the default value on creation for the created_at field. + follow.DefaultCreatedAt = followDescCreatedAt.Default.(func() time.Time) + // followDescUpdatedAt is the schema descriptor for updated_at field. + followDescUpdatedAt := followMixinFields0[5].Descriptor() + // follow.DefaultUpdatedAt holds the default value on creation for the updated_at field. + follow.DefaultUpdatedAt = followDescUpdatedAt.Default.(func() time.Time) + // follow.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + follow.UpdateDefaultUpdatedAt = followDescUpdatedAt.UpdateDefault.(func() time.Time) + // followDescID is the schema descriptor for id field. + followDescID := followMixinFields0[0].Descriptor() + // follow.DefaultID holds the default value on creation for the id field. + follow.DefaultID = followDescID.Default.(func() uuid.UUID) + noteMixin := schema.Note{}.Mixin() + noteMixinFields0 := noteMixin[0].Fields() + _ = noteMixinFields0 + noteFields := schema.Note{}.Fields() + _ = noteFields + // noteDescURI is the schema descriptor for uri field. + noteDescURI := noteMixinFields0[2].Descriptor() + // note.URIValidator is a validator for the "uri" field. It is called by the builders before save. + note.URIValidator = noteDescURI.Validators[0].(func(string) error) + // noteDescExtensions is the schema descriptor for extensions field. + noteDescExtensions := noteMixinFields0[3].Descriptor() + // note.DefaultExtensions holds the default value on creation for the extensions field. + note.DefaultExtensions = noteDescExtensions.Default.(lysand.Extensions) + // noteDescCreatedAt is the schema descriptor for created_at field. + noteDescCreatedAt := noteMixinFields0[4].Descriptor() + // note.DefaultCreatedAt holds the default value on creation for the created_at field. + note.DefaultCreatedAt = noteDescCreatedAt.Default.(func() time.Time) + // noteDescUpdatedAt is the schema descriptor for updated_at field. + noteDescUpdatedAt := noteMixinFields0[5].Descriptor() + // note.DefaultUpdatedAt holds the default value on creation for the updated_at field. + note.DefaultUpdatedAt = noteDescUpdatedAt.Default.(func() time.Time) + // note.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + note.UpdateDefaultUpdatedAt = noteDescUpdatedAt.UpdateDefault.(func() time.Time) + // noteDescSubject is the schema descriptor for subject field. + noteDescSubject := noteFields[0].Descriptor() + // note.SubjectValidator is a validator for the "subject" field. It is called by the builders before save. + note.SubjectValidator = noteDescSubject.Validators[0].(func(string) error) + // noteDescIsSensitive is the schema descriptor for isSensitive field. + noteDescIsSensitive := noteFields[2].Descriptor() + // note.DefaultIsSensitive holds the default value on creation for the isSensitive field. + note.DefaultIsSensitive = noteDescIsSensitive.Default.(bool) + // noteDescID is the schema descriptor for id field. + noteDescID := noteMixinFields0[0].Descriptor() + // note.DefaultID holds the default value on creation for the id field. + note.DefaultID = noteDescID.Default.(func() uuid.UUID) + servermetadataMixin := schema.ServerMetadata{}.Mixin() + servermetadataMixinFields0 := servermetadataMixin[0].Fields() + _ = servermetadataMixinFields0 + servermetadataFields := schema.ServerMetadata{}.Fields() + _ = servermetadataFields + // servermetadataDescURI is the schema descriptor for uri field. + servermetadataDescURI := servermetadataMixinFields0[2].Descriptor() + // servermetadata.URIValidator is a validator for the "uri" field. It is called by the builders before save. + servermetadata.URIValidator = servermetadataDescURI.Validators[0].(func(string) error) + // servermetadataDescExtensions is the schema descriptor for extensions field. + servermetadataDescExtensions := servermetadataMixinFields0[3].Descriptor() + // servermetadata.DefaultExtensions holds the default value on creation for the extensions field. + servermetadata.DefaultExtensions = servermetadataDescExtensions.Default.(lysand.Extensions) + // servermetadataDescCreatedAt is the schema descriptor for created_at field. + servermetadataDescCreatedAt := servermetadataMixinFields0[4].Descriptor() + // servermetadata.DefaultCreatedAt holds the default value on creation for the created_at field. + servermetadata.DefaultCreatedAt = servermetadataDescCreatedAt.Default.(func() time.Time) + // servermetadataDescUpdatedAt is the schema descriptor for updated_at field. + servermetadataDescUpdatedAt := servermetadataMixinFields0[5].Descriptor() + // servermetadata.DefaultUpdatedAt holds the default value on creation for the updated_at field. + servermetadata.DefaultUpdatedAt = servermetadataDescUpdatedAt.Default.(func() time.Time) + // servermetadata.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + servermetadata.UpdateDefaultUpdatedAt = servermetadataDescUpdatedAt.UpdateDefault.(func() time.Time) + // servermetadataDescName is the schema descriptor for name field. + servermetadataDescName := servermetadataFields[0].Descriptor() + // servermetadata.NameValidator is a validator for the "name" field. It is called by the builders before save. + servermetadata.NameValidator = servermetadataDescName.Validators[0].(func(string) error) + // servermetadataDescVersion is the schema descriptor for version field. + servermetadataDescVersion := servermetadataFields[2].Descriptor() + // servermetadata.VersionValidator is a validator for the "version" field. It is called by the builders before save. + servermetadata.VersionValidator = servermetadataDescVersion.Validators[0].(func(string) error) + // servermetadataDescSupportedExtensions is the schema descriptor for supportedExtensions field. + servermetadataDescSupportedExtensions := servermetadataFields[3].Descriptor() + // servermetadata.DefaultSupportedExtensions holds the default value on creation for the supportedExtensions field. + servermetadata.DefaultSupportedExtensions = servermetadataDescSupportedExtensions.Default.([]string) + // servermetadataDescID is the schema descriptor for id field. + servermetadataDescID := servermetadataMixinFields0[0].Descriptor() + // servermetadata.DefaultID holds the default value on creation for the id field. + servermetadata.DefaultID = servermetadataDescID.Default.(func() uuid.UUID) + userMixin := schema.User{}.Mixin() + userMixinFields0 := userMixin[0].Fields() + _ = userMixinFields0 + userFields := schema.User{}.Fields() + _ = userFields + // userDescURI is the schema descriptor for uri field. + userDescURI := userMixinFields0[2].Descriptor() + // user.URIValidator is a validator for the "uri" field. It is called by the builders before save. + user.URIValidator = userDescURI.Validators[0].(func(string) error) + // userDescExtensions is the schema descriptor for extensions field. + userDescExtensions := userMixinFields0[3].Descriptor() + // user.DefaultExtensions holds the default value on creation for the extensions field. + user.DefaultExtensions = userDescExtensions.Default.(lysand.Extensions) + // userDescCreatedAt is the schema descriptor for created_at field. + userDescCreatedAt := userMixinFields0[4].Descriptor() + // user.DefaultCreatedAt holds the default value on creation for the created_at field. + user.DefaultCreatedAt = userDescCreatedAt.Default.(func() time.Time) + // userDescUpdatedAt is the schema descriptor for updated_at field. + userDescUpdatedAt := userMixinFields0[5].Descriptor() + // user.DefaultUpdatedAt holds the default value on creation for the updated_at field. + user.DefaultUpdatedAt = userDescUpdatedAt.Default.(func() time.Time) + // user.UpdateDefaultUpdatedAt holds the default value on update for the updated_at field. + user.UpdateDefaultUpdatedAt = userDescUpdatedAt.UpdateDefault.(func() time.Time) + // userDescUsername is the schema descriptor for username field. + userDescUsername := userFields[0].Descriptor() + // user.UsernameValidator is a validator for the "username" field. It is called by the builders before save. + user.UsernameValidator = func() func(string) error { + validators := userDescUsername.Validators + fns := [...]func(string) error{ + validators[0].(func(string) error), + validators[1].(func(string) error), + } + return func(username string) error { + for _, fn := range fns { + if err := fn(username); err != nil { + return err + } + } + return nil + } + }() + // userDescDisplayName is the schema descriptor for displayName field. + userDescDisplayName := userFields[2].Descriptor() + // user.DisplayNameValidator is a validator for the "displayName" field. It is called by the builders before save. + user.DisplayNameValidator = userDescDisplayName.Validators[0].(func(string) error) + // userDescIndexable is the schema descriptor for indexable field. + userDescIndexable := userFields[6].Descriptor() + // user.DefaultIndexable holds the default value on creation for the indexable field. + user.DefaultIndexable = userDescIndexable.Default.(bool) + // userDescFields is the schema descriptor for fields field. + userDescFields := userFields[8].Descriptor() + // user.DefaultFields holds the default value on creation for the fields field. + user.DefaultFields = userDescFields.Default.([]lysand.Field) + // userDescInbox is the schema descriptor for inbox field. + userDescInbox := userFields[9].Descriptor() + // user.InboxValidator is a validator for the "inbox" field. It is called by the builders before save. + user.InboxValidator = userDescInbox.Validators[0].(func(string) error) + // userDescFeatured is the schema descriptor for featured field. + userDescFeatured := userFields[10].Descriptor() + // user.FeaturedValidator is a validator for the "featured" field. It is called by the builders before save. + user.FeaturedValidator = userDescFeatured.Validators[0].(func(string) error) + // userDescFollowers is the schema descriptor for followers field. + userDescFollowers := userFields[11].Descriptor() + // user.FollowersValidator is a validator for the "followers" field. It is called by the builders before save. + user.FollowersValidator = userDescFollowers.Validators[0].(func(string) error) + // userDescFollowing is the schema descriptor for following field. + userDescFollowing := userFields[12].Descriptor() + // user.FollowingValidator is a validator for the "following" field. It is called by the builders before save. + user.FollowingValidator = userDescFollowing.Validators[0].(func(string) error) + // userDescOutbox is the schema descriptor for outbox field. + userDescOutbox := userFields[13].Descriptor() + // user.OutboxValidator is a validator for the "outbox" field. It is called by the builders before save. + user.OutboxValidator = userDescOutbox.Validators[0].(func(string) error) + // userDescID is the schema descriptor for id field. + userDescID := userMixinFields0[0].Descriptor() + // user.DefaultID holds the default value on creation for the id field. + user.DefaultID = userDescID.Default.(func() uuid.UUID) +} diff --git a/ent/runtime/runtime.go b/ent/runtime/runtime.go new file mode 100644 index 0000000..dabd245 --- /dev/null +++ b/ent/runtime/runtime.go @@ -0,0 +1,10 @@ +// Code generated by ent, DO NOT EDIT. + +package runtime + +// The schema-stitching logic is generated in github.com/lysand-org/versia-go/ent/runtime.go + +const ( + Version = "v0.13.1" // Version of ent codegen. + Sum = "h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE=" // Sum of ent codegen. +) diff --git a/ent/schema/attachment.go b/ent/schema/attachment.go new file mode 100644 index 0000000..a3e7334 --- /dev/null +++ b/ent/schema/attachment.go @@ -0,0 +1,34 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" +) + +type Attachment struct{ ent.Schema } + +func (Attachment) Fields() []ent.Field { + return []ent.Field{ + field.String("description").MaxLen(384), + field.Bytes("sha256"), + field.Int("size"), + + field.String("blurhash").Optional().Nillable(), + field.Int("height").Optional().Nillable(), + field.Int("width").Optional().Nillable(), + field.Int("fps").Optional().Nillable(), + + field.String("mimeType"), + } +} + +func (Attachment) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("author", User.Type).Unique().Required(), + } +} + +func (Attachment) Mixin() []ent.Mixin { + return []ent.Mixin{LysandEntityMixin{}} +} diff --git a/ent/schema/follow.go b/ent/schema/follow.go new file mode 100644 index 0000000..0502c2a --- /dev/null +++ b/ent/schema/follow.go @@ -0,0 +1,33 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +type Follow struct{ ent.Schema } + +func (Follow) Fields() []ent.Field { + return []ent.Field{ + field.Enum("status").Values("pending", "accepted").Default("pending"), + } +} + +func (Follow) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("follower", User.Type).Unique().Required(), + edge.To("followee", User.Type).Unique().Required(), + } +} + +func (Follow) Indexes() []ent.Index { + return []ent.Index{ + index.Edges("follower", "followee").Unique(), + } +} + +func (Follow) Mixin() []ent.Mixin { + return []ent.Mixin{LysandEntityMixin{}} +} diff --git a/ent/schema/image.go b/ent/schema/image.go new file mode 100644 index 0000000..2f6e72f --- /dev/null +++ b/ent/schema/image.go @@ -0,0 +1,19 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/field" +) + +type Image struct{ ent.Schema } + +func (Image) Fields() []ent.Field { + return []ent.Field{ + field.String("url"), + field.String("mimeType"), + } +} + +func (Image) Edges() []ent.Edge { + return nil +} diff --git a/ent/schema/lysand_entity.go b/ent/schema/lysand_entity.go new file mode 100644 index 0000000..a8665a7 --- /dev/null +++ b/ent/schema/lysand_entity.go @@ -0,0 +1,40 @@ +package schema + +import ( + "net/url" + "time" + + "entgo.io/ent" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/mixin" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +type LysandEntityMixin struct{ mixin.Schema } + +var _ ent.Mixin = (*LysandEntityMixin)(nil) + +func (LysandEntityMixin) Fields() []ent.Field { + return []ent.Field{ + field.UUID("id", uuid.UUID{}). + Default(uuid.New). + Immutable(), + field.Bool("isRemote"), + field.String("uri").Validate(ValidateURI), + + field.JSON("extensions", lysand.Extensions{}).Default(lysand.Extensions{}), + + field.Time("created_at"). + Immutable(). + Default(time.Now), + field.Time("updated_at"). + Default(time.Now). + UpdateDefault(time.Now), + } +} + +func ValidateURI(s string) error { + _, err := url.Parse(s) + return err +} diff --git a/ent/schema/note.go b/ent/schema/note.go new file mode 100644 index 0000000..418e91b --- /dev/null +++ b/ent/schema/note.go @@ -0,0 +1,32 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" +) + +type Note struct{ ent.Schema } + +func (Note) Fields() []ent.Field { + return []ent.Field{ + field.String("subject").MaxLen(384).Optional().Nillable(), + field.String("content"), + + field.Bool("isSensitive").Default(false), + field.Enum("visibility").Values("public", "unlisted", "followers", "direct").Default("public"), + } +} + +func (Note) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("author", User.Type).Unique().Required(), + edge.To("mentions", User.Type), + + edge.To("attachments", Attachment.Type), + } +} + +func (Note) Mixin() []ent.Mixin { + return []ent.Mixin{LysandEntityMixin{}} +} diff --git a/ent/schema/server_metadata.go b/ent/schema/server_metadata.go new file mode 100644 index 0000000..1cb2d95 --- /dev/null +++ b/ent/schema/server_metadata.go @@ -0,0 +1,43 @@ +package schema + +import ( + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "entgo.io/ent/schema/index" +) + +type ServerMetadata struct{ ent.Schema } + +func (ServerMetadata) Fields() []ent.Field { + return []ent.Field{ + field.String("name"). + NotEmpty(), + field.String("description"). + Optional(). + Nillable(), + + field.String("version"). + NotEmpty(), + + field.Strings("supportedExtensions"). + Default([]string{}), + } +} + +func (ServerMetadata) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("follower", User.Type).Unique().Required(), + edge.To("followee", User.Type).Unique().Required(), + } +} + +func (ServerMetadata) Indexes() []ent.Index { + return []ent.Index{ + index.Edges("follower", "followee").Unique(), + } +} + +func (ServerMetadata) Mixin() []ent.Mixin { + return []ent.Mixin{LysandEntityMixin{}} +} diff --git a/ent/schema/user.go b/ent/schema/user.go new file mode 100644 index 0000000..129007a --- /dev/null +++ b/ent/schema/user.go @@ -0,0 +1,65 @@ +package schema + +import ( + "crypto/ed25519" + "errors" + "regexp" + + "entgo.io/ent" + "entgo.io/ent/schema/edge" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var ( + ErrUsernameInvalid = errors.New("username must match ^[a-z0-9_-]+$") + usernameRegex = regexp.MustCompile("^[a-z0-9_-]+$") +) + +type User struct{ ent.Schema } + +func (User) Fields() []ent.Field { + return []ent.Field{ + field.String("username").Unique().MaxLen(32).Validate(ValidateUsername), + field.Bytes("passwordHash").Optional().Nillable(), + + field.String("displayName").MaxLen(256).Optional().Nillable(), + field.String("biography").Optional().Nillable(), + + field.Bytes("publicKey").GoType(ed25519.PublicKey([]byte{})), + field.Bytes("privateKey").GoType(ed25519.PrivateKey([]byte{})).Optional(), + + field.Bool("indexable").Default(true), + field.Enum("privacyLevel").Values("public", "restricted", "private").Default("public"), + + field.JSON("fields", []lysand.Field{}).Default([]lysand.Field{}), + + field.String("inbox").Validate(ValidateURI), + + // Collections + field.String("featured").Validate(ValidateURI), + field.String("followers").Validate(ValidateURI), + field.String("following").Validate(ValidateURI), + field.String("outbox").Validate(ValidateURI), + } +} + +func (User) Edges() []ent.Edge { + return []ent.Edge{ + edge.To("avatarImage", Image.Type).Unique(), + edge.To("headerImage", Image.Type).Unique(), + + edge.From("authoredNotes", Note.Type).Ref("author"), + edge.From("mentionedNotes", Note.Type).Ref("mentions"), + } +} + +func (User) Mixin() []ent.Mixin { return []ent.Mixin{LysandEntityMixin{}} } + +func ValidateUsername(username string) error { + if !usernameRegex.MatchString(username) { + return ErrUsernameInvalid + } + + return nil +} diff --git a/ent/servermetadata.go b/ent/servermetadata.go new file mode 100644 index 0000000..7c50c8d --- /dev/null +++ b/ent/servermetadata.go @@ -0,0 +1,275 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// ServerMetadata is the model entity for the ServerMetadata schema. +type ServerMetadata struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // IsRemote holds the value of the "isRemote" field. + IsRemote bool `json:"isRemote,omitempty"` + // URI holds the value of the "uri" field. + URI string `json:"uri,omitempty"` + // Extensions holds the value of the "extensions" field. + Extensions lysand.Extensions `json:"extensions,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Name holds the value of the "name" field. + Name string `json:"name,omitempty"` + // Description holds the value of the "description" field. + Description *string `json:"description,omitempty"` + // Version holds the value of the "version" field. + Version string `json:"version,omitempty"` + // SupportedExtensions holds the value of the "supportedExtensions" field. + SupportedExtensions []string `json:"supportedExtensions,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the ServerMetadataQuery when eager-loading is set. + Edges ServerMetadataEdges `json:"edges"` + server_metadata_follower *uuid.UUID + server_metadata_followee *uuid.UUID + selectValues sql.SelectValues +} + +// ServerMetadataEdges holds the relations/edges for other nodes in the graph. +type ServerMetadataEdges struct { + // Follower holds the value of the follower edge. + Follower *User `json:"follower,omitempty"` + // Followee holds the value of the followee edge. + Followee *User `json:"followee,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [2]bool +} + +// FollowerOrErr returns the Follower value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ServerMetadataEdges) FollowerOrErr() (*User, error) { + if e.Follower != nil { + return e.Follower, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "follower"} +} + +// FolloweeOrErr returns the Followee value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e ServerMetadataEdges) FolloweeOrErr() (*User, error) { + if e.Followee != nil { + return e.Followee, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: user.Label} + } + return nil, &NotLoadedError{edge: "followee"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*ServerMetadata) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case servermetadata.FieldExtensions, servermetadata.FieldSupportedExtensions: + values[i] = new([]byte) + case servermetadata.FieldIsRemote: + values[i] = new(sql.NullBool) + case servermetadata.FieldURI, servermetadata.FieldName, servermetadata.FieldDescription, servermetadata.FieldVersion: + values[i] = new(sql.NullString) + case servermetadata.FieldCreatedAt, servermetadata.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case servermetadata.FieldID: + values[i] = new(uuid.UUID) + case servermetadata.ForeignKeys[0]: // server_metadata_follower + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + case servermetadata.ForeignKeys[1]: // server_metadata_followee + values[i] = &sql.NullScanner{S: new(uuid.UUID)} + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the ServerMetadata fields. +func (sm *ServerMetadata) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case servermetadata.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + sm.ID = *value + } + case servermetadata.FieldIsRemote: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isRemote", values[i]) + } else if value.Valid { + sm.IsRemote = value.Bool + } + case servermetadata.FieldURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field uri", values[i]) + } else if value.Valid { + sm.URI = value.String + } + case servermetadata.FieldExtensions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extensions", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &sm.Extensions); err != nil { + return fmt.Errorf("unmarshal field extensions: %w", err) + } + } + case servermetadata.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + sm.CreatedAt = value.Time + } + case servermetadata.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + sm.UpdatedAt = value.Time + } + case servermetadata.FieldName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field name", values[i]) + } else if value.Valid { + sm.Name = value.String + } + case servermetadata.FieldDescription: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field description", values[i]) + } else if value.Valid { + sm.Description = new(string) + *sm.Description = value.String + } + case servermetadata.FieldVersion: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field version", values[i]) + } else if value.Valid { + sm.Version = value.String + } + case servermetadata.FieldSupportedExtensions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field supportedExtensions", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &sm.SupportedExtensions); err != nil { + return fmt.Errorf("unmarshal field supportedExtensions: %w", err) + } + } + case servermetadata.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field server_metadata_follower", values[i]) + } else if value.Valid { + sm.server_metadata_follower = new(uuid.UUID) + *sm.server_metadata_follower = *value.S.(*uuid.UUID) + } + case servermetadata.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullScanner); !ok { + return fmt.Errorf("unexpected type %T for field server_metadata_followee", values[i]) + } else if value.Valid { + sm.server_metadata_followee = new(uuid.UUID) + *sm.server_metadata_followee = *value.S.(*uuid.UUID) + } + default: + sm.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the ServerMetadata. +// This includes values selected through modifiers, order, etc. +func (sm *ServerMetadata) Value(name string) (ent.Value, error) { + return sm.selectValues.Get(name) +} + +// QueryFollower queries the "follower" edge of the ServerMetadata entity. +func (sm *ServerMetadata) QueryFollower() *UserQuery { + return NewServerMetadataClient(sm.config).QueryFollower(sm) +} + +// QueryFollowee queries the "followee" edge of the ServerMetadata entity. +func (sm *ServerMetadata) QueryFollowee() *UserQuery { + return NewServerMetadataClient(sm.config).QueryFollowee(sm) +} + +// Update returns a builder for updating this ServerMetadata. +// Note that you need to call ServerMetadata.Unwrap() before calling this method if this ServerMetadata +// was returned from a transaction, and the transaction was committed or rolled back. +func (sm *ServerMetadata) Update() *ServerMetadataUpdateOne { + return NewServerMetadataClient(sm.config).UpdateOne(sm) +} + +// Unwrap unwraps the ServerMetadata entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (sm *ServerMetadata) Unwrap() *ServerMetadata { + _tx, ok := sm.config.driver.(*txDriver) + if !ok { + panic("ent: ServerMetadata is not a transactional entity") + } + sm.config.driver = _tx.drv + return sm +} + +// String implements the fmt.Stringer. +func (sm *ServerMetadata) String() string { + var builder strings.Builder + builder.WriteString("ServerMetadata(") + builder.WriteString(fmt.Sprintf("id=%v, ", sm.ID)) + builder.WriteString("isRemote=") + builder.WriteString(fmt.Sprintf("%v", sm.IsRemote)) + builder.WriteString(", ") + builder.WriteString("uri=") + builder.WriteString(sm.URI) + builder.WriteString(", ") + builder.WriteString("extensions=") + builder.WriteString(fmt.Sprintf("%v", sm.Extensions)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(sm.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(sm.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("name=") + builder.WriteString(sm.Name) + builder.WriteString(", ") + if v := sm.Description; v != nil { + builder.WriteString("description=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("version=") + builder.WriteString(sm.Version) + builder.WriteString(", ") + builder.WriteString("supportedExtensions=") + builder.WriteString(fmt.Sprintf("%v", sm.SupportedExtensions)) + builder.WriteByte(')') + return builder.String() +} + +// ServerMetadataSlice is a parsable slice of ServerMetadata. +type ServerMetadataSlice []*ServerMetadata diff --git a/ent/servermetadata/servermetadata.go b/ent/servermetadata/servermetadata.go new file mode 100644 index 0000000..f138d27 --- /dev/null +++ b/ent/servermetadata/servermetadata.go @@ -0,0 +1,185 @@ +// Code generated by ent, DO NOT EDIT. + +package servermetadata + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const ( + // Label holds the string label denoting the servermetadata type in the database. + Label = "server_metadata" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldIsRemote holds the string denoting the isremote field in the database. + FieldIsRemote = "is_remote" + // FieldURI holds the string denoting the uri field in the database. + FieldURI = "uri" + // FieldExtensions holds the string denoting the extensions field in the database. + FieldExtensions = "extensions" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldName holds the string denoting the name field in the database. + FieldName = "name" + // FieldDescription holds the string denoting the description field in the database. + FieldDescription = "description" + // FieldVersion holds the string denoting the version field in the database. + FieldVersion = "version" + // FieldSupportedExtensions holds the string denoting the supportedextensions field in the database. + FieldSupportedExtensions = "supported_extensions" + // EdgeFollower holds the string denoting the follower edge name in mutations. + EdgeFollower = "follower" + // EdgeFollowee holds the string denoting the followee edge name in mutations. + EdgeFollowee = "followee" + // Table holds the table name of the servermetadata in the database. + Table = "server_metadata" + // FollowerTable is the table that holds the follower relation/edge. + FollowerTable = "server_metadata" + // FollowerInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + FollowerInverseTable = "users" + // FollowerColumn is the table column denoting the follower relation/edge. + FollowerColumn = "server_metadata_follower" + // FolloweeTable is the table that holds the followee relation/edge. + FolloweeTable = "server_metadata" + // FolloweeInverseTable is the table name for the User entity. + // It exists in this package in order to avoid circular dependency with the "user" package. + FolloweeInverseTable = "users" + // FolloweeColumn is the table column denoting the followee relation/edge. + FolloweeColumn = "server_metadata_followee" +) + +// Columns holds all SQL columns for servermetadata fields. +var Columns = []string{ + FieldID, + FieldIsRemote, + FieldURI, + FieldExtensions, + FieldCreatedAt, + FieldUpdatedAt, + FieldName, + FieldDescription, + FieldVersion, + FieldSupportedExtensions, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "server_metadata" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "server_metadata_follower", + "server_metadata_followee", +} + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // URIValidator is a validator for the "uri" field. It is called by the builders before save. + URIValidator func(string) error + // DefaultExtensions holds the default value on creation for the "extensions" field. + DefaultExtensions lysand.Extensions + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // NameValidator is a validator for the "name" field. It is called by the builders before save. + NameValidator func(string) error + // VersionValidator is a validator for the "version" field. It is called by the builders before save. + VersionValidator func(string) error + // DefaultSupportedExtensions holds the default value on creation for the "supportedExtensions" field. + DefaultSupportedExtensions []string + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// OrderOption defines the ordering options for the ServerMetadata queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByIsRemote orders the results by the isRemote field. +func ByIsRemote(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsRemote, opts...).ToFunc() +} + +// ByURI orders the results by the uri field. +func ByURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURI, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByName orders the results by the name field. +func ByName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldName, opts...).ToFunc() +} + +// ByDescription orders the results by the description field. +func ByDescription(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDescription, opts...).ToFunc() +} + +// ByVersion orders the results by the version field. +func ByVersion(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldVersion, opts...).ToFunc() +} + +// ByFollowerField orders the results by follower field. +func ByFollowerField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFollowerStep(), sql.OrderByField(field, opts...)) + } +} + +// ByFolloweeField orders the results by followee field. +func ByFolloweeField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newFolloweeStep(), sql.OrderByField(field, opts...)) + } +} +func newFollowerStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FollowerInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FollowerTable, FollowerColumn), + ) +} +func newFolloweeStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(FolloweeInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FolloweeTable, FolloweeColumn), + ) +} diff --git a/ent/servermetadata/where.go b/ent/servermetadata/where.go new file mode 100644 index 0000000..cff3789 --- /dev/null +++ b/ent/servermetadata/where.go @@ -0,0 +1,513 @@ +// Code generated by ent, DO NOT EDIT. + +package servermetadata + +import ( + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldID, id)) +} + +// IsRemote applies equality check predicate on the "isRemote" field. It's identical to IsRemoteEQ. +func IsRemote(v bool) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldIsRemote, v)) +} + +// URI applies equality check predicate on the "uri" field. It's identical to URIEQ. +func URI(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldURI, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Name applies equality check predicate on the "name" field. It's identical to NameEQ. +func Name(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldName, v)) +} + +// Description applies equality check predicate on the "description" field. It's identical to DescriptionEQ. +func Description(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldDescription, v)) +} + +// Version applies equality check predicate on the "version" field. It's identical to VersionEQ. +func Version(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldVersion, v)) +} + +// IsRemoteEQ applies the EQ predicate on the "isRemote" field. +func IsRemoteEQ(v bool) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldIsRemote, v)) +} + +// IsRemoteNEQ applies the NEQ predicate on the "isRemote" field. +func IsRemoteNEQ(v bool) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldIsRemote, v)) +} + +// URIEQ applies the EQ predicate on the "uri" field. +func URIEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldURI, v)) +} + +// URINEQ applies the NEQ predicate on the "uri" field. +func URINEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldURI, v)) +} + +// URIIn applies the In predicate on the "uri" field. +func URIIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldURI, vs...)) +} + +// URINotIn applies the NotIn predicate on the "uri" field. +func URINotIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldURI, vs...)) +} + +// URIGT applies the GT predicate on the "uri" field. +func URIGT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldURI, v)) +} + +// URIGTE applies the GTE predicate on the "uri" field. +func URIGTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldURI, v)) +} + +// URILT applies the LT predicate on the "uri" field. +func URILT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldURI, v)) +} + +// URILTE applies the LTE predicate on the "uri" field. +func URILTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldURI, v)) +} + +// URIContains applies the Contains predicate on the "uri" field. +func URIContains(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContains(FieldURI, v)) +} + +// URIHasPrefix applies the HasPrefix predicate on the "uri" field. +func URIHasPrefix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasPrefix(FieldURI, v)) +} + +// URIHasSuffix applies the HasSuffix predicate on the "uri" field. +func URIHasSuffix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasSuffix(FieldURI, v)) +} + +// URIEqualFold applies the EqualFold predicate on the "uri" field. +func URIEqualFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEqualFold(FieldURI, v)) +} + +// URIContainsFold applies the ContainsFold predicate on the "uri" field. +func URIContainsFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContainsFold(FieldURI, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// NameEQ applies the EQ predicate on the "name" field. +func NameEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldName, v)) +} + +// NameNEQ applies the NEQ predicate on the "name" field. +func NameNEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldName, v)) +} + +// NameIn applies the In predicate on the "name" field. +func NameIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldName, vs...)) +} + +// NameNotIn applies the NotIn predicate on the "name" field. +func NameNotIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldName, vs...)) +} + +// NameGT applies the GT predicate on the "name" field. +func NameGT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldName, v)) +} + +// NameGTE applies the GTE predicate on the "name" field. +func NameGTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldName, v)) +} + +// NameLT applies the LT predicate on the "name" field. +func NameLT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldName, v)) +} + +// NameLTE applies the LTE predicate on the "name" field. +func NameLTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldName, v)) +} + +// NameContains applies the Contains predicate on the "name" field. +func NameContains(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContains(FieldName, v)) +} + +// NameHasPrefix applies the HasPrefix predicate on the "name" field. +func NameHasPrefix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasPrefix(FieldName, v)) +} + +// NameHasSuffix applies the HasSuffix predicate on the "name" field. +func NameHasSuffix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasSuffix(FieldName, v)) +} + +// NameEqualFold applies the EqualFold predicate on the "name" field. +func NameEqualFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEqualFold(FieldName, v)) +} + +// NameContainsFold applies the ContainsFold predicate on the "name" field. +func NameContainsFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContainsFold(FieldName, v)) +} + +// DescriptionEQ applies the EQ predicate on the "description" field. +func DescriptionEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldDescription, v)) +} + +// DescriptionNEQ applies the NEQ predicate on the "description" field. +func DescriptionNEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldDescription, v)) +} + +// DescriptionIn applies the In predicate on the "description" field. +func DescriptionIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldDescription, vs...)) +} + +// DescriptionNotIn applies the NotIn predicate on the "description" field. +func DescriptionNotIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldDescription, vs...)) +} + +// DescriptionGT applies the GT predicate on the "description" field. +func DescriptionGT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldDescription, v)) +} + +// DescriptionGTE applies the GTE predicate on the "description" field. +func DescriptionGTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldDescription, v)) +} + +// DescriptionLT applies the LT predicate on the "description" field. +func DescriptionLT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldDescription, v)) +} + +// DescriptionLTE applies the LTE predicate on the "description" field. +func DescriptionLTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldDescription, v)) +} + +// DescriptionContains applies the Contains predicate on the "description" field. +func DescriptionContains(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContains(FieldDescription, v)) +} + +// DescriptionHasPrefix applies the HasPrefix predicate on the "description" field. +func DescriptionHasPrefix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasPrefix(FieldDescription, v)) +} + +// DescriptionHasSuffix applies the HasSuffix predicate on the "description" field. +func DescriptionHasSuffix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasSuffix(FieldDescription, v)) +} + +// DescriptionIsNil applies the IsNil predicate on the "description" field. +func DescriptionIsNil() predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIsNull(FieldDescription)) +} + +// DescriptionNotNil applies the NotNil predicate on the "description" field. +func DescriptionNotNil() predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotNull(FieldDescription)) +} + +// DescriptionEqualFold applies the EqualFold predicate on the "description" field. +func DescriptionEqualFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEqualFold(FieldDescription, v)) +} + +// DescriptionContainsFold applies the ContainsFold predicate on the "description" field. +func DescriptionContainsFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContainsFold(FieldDescription, v)) +} + +// VersionEQ applies the EQ predicate on the "version" field. +func VersionEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEQ(FieldVersion, v)) +} + +// VersionNEQ applies the NEQ predicate on the "version" field. +func VersionNEQ(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNEQ(FieldVersion, v)) +} + +// VersionIn applies the In predicate on the "version" field. +func VersionIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldIn(FieldVersion, vs...)) +} + +// VersionNotIn applies the NotIn predicate on the "version" field. +func VersionNotIn(vs ...string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldNotIn(FieldVersion, vs...)) +} + +// VersionGT applies the GT predicate on the "version" field. +func VersionGT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGT(FieldVersion, v)) +} + +// VersionGTE applies the GTE predicate on the "version" field. +func VersionGTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldGTE(FieldVersion, v)) +} + +// VersionLT applies the LT predicate on the "version" field. +func VersionLT(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLT(FieldVersion, v)) +} + +// VersionLTE applies the LTE predicate on the "version" field. +func VersionLTE(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldLTE(FieldVersion, v)) +} + +// VersionContains applies the Contains predicate on the "version" field. +func VersionContains(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContains(FieldVersion, v)) +} + +// VersionHasPrefix applies the HasPrefix predicate on the "version" field. +func VersionHasPrefix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasPrefix(FieldVersion, v)) +} + +// VersionHasSuffix applies the HasSuffix predicate on the "version" field. +func VersionHasSuffix(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldHasSuffix(FieldVersion, v)) +} + +// VersionEqualFold applies the EqualFold predicate on the "version" field. +func VersionEqualFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldEqualFold(FieldVersion, v)) +} + +// VersionContainsFold applies the ContainsFold predicate on the "version" field. +func VersionContainsFold(v string) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.FieldContainsFold(FieldVersion, v)) +} + +// HasFollower applies the HasEdge predicate on the "follower" edge. +func HasFollower() predicate.ServerMetadata { + return predicate.ServerMetadata(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FollowerTable, FollowerColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasFollowerWith applies the HasEdge predicate on the "follower" edge with a given conditions (other predicates). +func HasFollowerWith(preds ...predicate.User) predicate.ServerMetadata { + return predicate.ServerMetadata(func(s *sql.Selector) { + step := newFollowerStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasFollowee applies the HasEdge predicate on the "followee" edge. +func HasFollowee() predicate.ServerMetadata { + return predicate.ServerMetadata(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, FolloweeTable, FolloweeColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasFolloweeWith applies the HasEdge predicate on the "followee" edge with a given conditions (other predicates). +func HasFolloweeWith(preds ...predicate.User) predicate.ServerMetadata { + return predicate.ServerMetadata(func(s *sql.Selector) { + step := newFolloweeStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.ServerMetadata) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.ServerMetadata) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.ServerMetadata) predicate.ServerMetadata { + return predicate.ServerMetadata(sql.NotPredicates(p)) +} diff --git a/ent/servermetadata_create.go b/ent/servermetadata_create.go new file mode 100644 index 0000000..3071138 --- /dev/null +++ b/ent/servermetadata_create.go @@ -0,0 +1,1035 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// ServerMetadataCreate is the builder for creating a ServerMetadata entity. +type ServerMetadataCreate struct { + config + mutation *ServerMetadataMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetIsRemote sets the "isRemote" field. +func (smc *ServerMetadataCreate) SetIsRemote(b bool) *ServerMetadataCreate { + smc.mutation.SetIsRemote(b) + return smc +} + +// SetURI sets the "uri" field. +func (smc *ServerMetadataCreate) SetURI(s string) *ServerMetadataCreate { + smc.mutation.SetURI(s) + return smc +} + +// SetExtensions sets the "extensions" field. +func (smc *ServerMetadataCreate) SetExtensions(l lysand.Extensions) *ServerMetadataCreate { + smc.mutation.SetExtensions(l) + return smc +} + +// SetCreatedAt sets the "created_at" field. +func (smc *ServerMetadataCreate) SetCreatedAt(t time.Time) *ServerMetadataCreate { + smc.mutation.SetCreatedAt(t) + return smc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (smc *ServerMetadataCreate) SetNillableCreatedAt(t *time.Time) *ServerMetadataCreate { + if t != nil { + smc.SetCreatedAt(*t) + } + return smc +} + +// SetUpdatedAt sets the "updated_at" field. +func (smc *ServerMetadataCreate) SetUpdatedAt(t time.Time) *ServerMetadataCreate { + smc.mutation.SetUpdatedAt(t) + return smc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (smc *ServerMetadataCreate) SetNillableUpdatedAt(t *time.Time) *ServerMetadataCreate { + if t != nil { + smc.SetUpdatedAt(*t) + } + return smc +} + +// SetName sets the "name" field. +func (smc *ServerMetadataCreate) SetName(s string) *ServerMetadataCreate { + smc.mutation.SetName(s) + return smc +} + +// SetDescription sets the "description" field. +func (smc *ServerMetadataCreate) SetDescription(s string) *ServerMetadataCreate { + smc.mutation.SetDescription(s) + return smc +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (smc *ServerMetadataCreate) SetNillableDescription(s *string) *ServerMetadataCreate { + if s != nil { + smc.SetDescription(*s) + } + return smc +} + +// SetVersion sets the "version" field. +func (smc *ServerMetadataCreate) SetVersion(s string) *ServerMetadataCreate { + smc.mutation.SetVersion(s) + return smc +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (smc *ServerMetadataCreate) SetSupportedExtensions(s []string) *ServerMetadataCreate { + smc.mutation.SetSupportedExtensions(s) + return smc +} + +// SetID sets the "id" field. +func (smc *ServerMetadataCreate) SetID(u uuid.UUID) *ServerMetadataCreate { + smc.mutation.SetID(u) + return smc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (smc *ServerMetadataCreate) SetNillableID(u *uuid.UUID) *ServerMetadataCreate { + if u != nil { + smc.SetID(*u) + } + return smc +} + +// SetFollowerID sets the "follower" edge to the User entity by ID. +func (smc *ServerMetadataCreate) SetFollowerID(id uuid.UUID) *ServerMetadataCreate { + smc.mutation.SetFollowerID(id) + return smc +} + +// SetFollower sets the "follower" edge to the User entity. +func (smc *ServerMetadataCreate) SetFollower(u *User) *ServerMetadataCreate { + return smc.SetFollowerID(u.ID) +} + +// SetFolloweeID sets the "followee" edge to the User entity by ID. +func (smc *ServerMetadataCreate) SetFolloweeID(id uuid.UUID) *ServerMetadataCreate { + smc.mutation.SetFolloweeID(id) + return smc +} + +// SetFollowee sets the "followee" edge to the User entity. +func (smc *ServerMetadataCreate) SetFollowee(u *User) *ServerMetadataCreate { + return smc.SetFolloweeID(u.ID) +} + +// Mutation returns the ServerMetadataMutation object of the builder. +func (smc *ServerMetadataCreate) Mutation() *ServerMetadataMutation { + return smc.mutation +} + +// Save creates the ServerMetadata in the database. +func (smc *ServerMetadataCreate) Save(ctx context.Context) (*ServerMetadata, error) { + smc.defaults() + return withHooks(ctx, smc.sqlSave, smc.mutation, smc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (smc *ServerMetadataCreate) SaveX(ctx context.Context) *ServerMetadata { + v, err := smc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (smc *ServerMetadataCreate) Exec(ctx context.Context) error { + _, err := smc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (smc *ServerMetadataCreate) ExecX(ctx context.Context) { + if err := smc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (smc *ServerMetadataCreate) defaults() { + if _, ok := smc.mutation.Extensions(); !ok { + v := servermetadata.DefaultExtensions + smc.mutation.SetExtensions(v) + } + if _, ok := smc.mutation.CreatedAt(); !ok { + v := servermetadata.DefaultCreatedAt() + smc.mutation.SetCreatedAt(v) + } + if _, ok := smc.mutation.UpdatedAt(); !ok { + v := servermetadata.DefaultUpdatedAt() + smc.mutation.SetUpdatedAt(v) + } + if _, ok := smc.mutation.SupportedExtensions(); !ok { + v := servermetadata.DefaultSupportedExtensions + smc.mutation.SetSupportedExtensions(v) + } + if _, ok := smc.mutation.ID(); !ok { + v := servermetadata.DefaultID() + smc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (smc *ServerMetadataCreate) check() error { + if _, ok := smc.mutation.IsRemote(); !ok { + return &ValidationError{Name: "isRemote", err: errors.New(`ent: missing required field "ServerMetadata.isRemote"`)} + } + if _, ok := smc.mutation.URI(); !ok { + return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "ServerMetadata.uri"`)} + } + if v, ok := smc.mutation.URI(); ok { + if err := servermetadata.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.uri": %w`, err)} + } + } + if _, ok := smc.mutation.Extensions(); !ok { + return &ValidationError{Name: "extensions", err: errors.New(`ent: missing required field "ServerMetadata.extensions"`)} + } + if _, ok := smc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "ServerMetadata.created_at"`)} + } + if _, ok := smc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "ServerMetadata.updated_at"`)} + } + if _, ok := smc.mutation.Name(); !ok { + return &ValidationError{Name: "name", err: errors.New(`ent: missing required field "ServerMetadata.name"`)} + } + if v, ok := smc.mutation.Name(); ok { + if err := servermetadata.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.name": %w`, err)} + } + } + if _, ok := smc.mutation.Version(); !ok { + return &ValidationError{Name: "version", err: errors.New(`ent: missing required field "ServerMetadata.version"`)} + } + if v, ok := smc.mutation.Version(); ok { + if err := servermetadata.VersionValidator(v); err != nil { + return &ValidationError{Name: "version", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.version": %w`, err)} + } + } + if _, ok := smc.mutation.SupportedExtensions(); !ok { + return &ValidationError{Name: "supportedExtensions", err: errors.New(`ent: missing required field "ServerMetadata.supportedExtensions"`)} + } + if _, ok := smc.mutation.FollowerID(); !ok { + return &ValidationError{Name: "follower", err: errors.New(`ent: missing required edge "ServerMetadata.follower"`)} + } + if _, ok := smc.mutation.FolloweeID(); !ok { + return &ValidationError{Name: "followee", err: errors.New(`ent: missing required edge "ServerMetadata.followee"`)} + } + return nil +} + +func (smc *ServerMetadataCreate) sqlSave(ctx context.Context) (*ServerMetadata, error) { + if err := smc.check(); err != nil { + return nil, err + } + _node, _spec := smc.createSpec() + if err := sqlgraph.CreateNode(ctx, smc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + smc.mutation.id = &_node.ID + smc.mutation.done = true + return _node, nil +} + +func (smc *ServerMetadataCreate) createSpec() (*ServerMetadata, *sqlgraph.CreateSpec) { + var ( + _node = &ServerMetadata{config: smc.config} + _spec = sqlgraph.NewCreateSpec(servermetadata.Table, sqlgraph.NewFieldSpec(servermetadata.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = smc.conflict + if id, ok := smc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := smc.mutation.IsRemote(); ok { + _spec.SetField(servermetadata.FieldIsRemote, field.TypeBool, value) + _node.IsRemote = value + } + if value, ok := smc.mutation.URI(); ok { + _spec.SetField(servermetadata.FieldURI, field.TypeString, value) + _node.URI = value + } + if value, ok := smc.mutation.Extensions(); ok { + _spec.SetField(servermetadata.FieldExtensions, field.TypeJSON, value) + _node.Extensions = value + } + if value, ok := smc.mutation.CreatedAt(); ok { + _spec.SetField(servermetadata.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := smc.mutation.UpdatedAt(); ok { + _spec.SetField(servermetadata.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := smc.mutation.Name(); ok { + _spec.SetField(servermetadata.FieldName, field.TypeString, value) + _node.Name = value + } + if value, ok := smc.mutation.Description(); ok { + _spec.SetField(servermetadata.FieldDescription, field.TypeString, value) + _node.Description = &value + } + if value, ok := smc.mutation.Version(); ok { + _spec.SetField(servermetadata.FieldVersion, field.TypeString, value) + _node.Version = value + } + if value, ok := smc.mutation.SupportedExtensions(); ok { + _spec.SetField(servermetadata.FieldSupportedExtensions, field.TypeJSON, value) + _node.SupportedExtensions = value + } + if nodes := smc.mutation.FollowerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FollowerTable, + Columns: []string{servermetadata.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.server_metadata_follower = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := smc.mutation.FolloweeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FolloweeTable, + Columns: []string{servermetadata.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.server_metadata_followee = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ServerMetadata.Create(). +// SetIsRemote(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ServerMetadataUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (smc *ServerMetadataCreate) OnConflict(opts ...sql.ConflictOption) *ServerMetadataUpsertOne { + smc.conflict = opts + return &ServerMetadataUpsertOne{ + create: smc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ServerMetadata.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (smc *ServerMetadataCreate) OnConflictColumns(columns ...string) *ServerMetadataUpsertOne { + smc.conflict = append(smc.conflict, sql.ConflictColumns(columns...)) + return &ServerMetadataUpsertOne{ + create: smc, + } +} + +type ( + // ServerMetadataUpsertOne is the builder for "upsert"-ing + // one ServerMetadata node. + ServerMetadataUpsertOne struct { + create *ServerMetadataCreate + } + + // ServerMetadataUpsert is the "OnConflict" setter. + ServerMetadataUpsert struct { + *sql.UpdateSet + } +) + +// SetIsRemote sets the "isRemote" field. +func (u *ServerMetadataUpsert) SetIsRemote(v bool) *ServerMetadataUpsert { + u.Set(servermetadata.FieldIsRemote, v) + return u +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateIsRemote() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldIsRemote) + return u +} + +// SetURI sets the "uri" field. +func (u *ServerMetadataUpsert) SetURI(v string) *ServerMetadataUpsert { + u.Set(servermetadata.FieldURI, v) + return u +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateURI() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldURI) + return u +} + +// SetExtensions sets the "extensions" field. +func (u *ServerMetadataUpsert) SetExtensions(v lysand.Extensions) *ServerMetadataUpsert { + u.Set(servermetadata.FieldExtensions, v) + return u +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateExtensions() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldExtensions) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ServerMetadataUpsert) SetUpdatedAt(v time.Time) *ServerMetadataUpsert { + u.Set(servermetadata.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateUpdatedAt() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldUpdatedAt) + return u +} + +// SetName sets the "name" field. +func (u *ServerMetadataUpsert) SetName(v string) *ServerMetadataUpsert { + u.Set(servermetadata.FieldName, v) + return u +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateName() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldName) + return u +} + +// SetDescription sets the "description" field. +func (u *ServerMetadataUpsert) SetDescription(v string) *ServerMetadataUpsert { + u.Set(servermetadata.FieldDescription, v) + return u +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateDescription() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldDescription) + return u +} + +// ClearDescription clears the value of the "description" field. +func (u *ServerMetadataUpsert) ClearDescription() *ServerMetadataUpsert { + u.SetNull(servermetadata.FieldDescription) + return u +} + +// SetVersion sets the "version" field. +func (u *ServerMetadataUpsert) SetVersion(v string) *ServerMetadataUpsert { + u.Set(servermetadata.FieldVersion, v) + return u +} + +// UpdateVersion sets the "version" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateVersion() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldVersion) + return u +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (u *ServerMetadataUpsert) SetSupportedExtensions(v []string) *ServerMetadataUpsert { + u.Set(servermetadata.FieldSupportedExtensions, v) + return u +} + +// UpdateSupportedExtensions sets the "supportedExtensions" field to the value that was provided on create. +func (u *ServerMetadataUpsert) UpdateSupportedExtensions() *ServerMetadataUpsert { + u.SetExcluded(servermetadata.FieldSupportedExtensions) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.ServerMetadata.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(servermetadata.FieldID) +// }), +// ). +// Exec(ctx) +func (u *ServerMetadataUpsertOne) UpdateNewValues() *ServerMetadataUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(servermetadata.FieldID) + } + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(servermetadata.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ServerMetadata.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ServerMetadataUpsertOne) Ignore() *ServerMetadataUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ServerMetadataUpsertOne) DoNothing() *ServerMetadataUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ServerMetadataCreate.OnConflict +// documentation for more info. +func (u *ServerMetadataUpsertOne) Update(set func(*ServerMetadataUpsert)) *ServerMetadataUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ServerMetadataUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *ServerMetadataUpsertOne) SetIsRemote(v bool) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateIsRemote() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *ServerMetadataUpsertOne) SetURI(v string) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateURI() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *ServerMetadataUpsertOne) SetExtensions(v lysand.Extensions) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateExtensions() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ServerMetadataUpsertOne) SetUpdatedAt(v time.Time) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateUpdatedAt() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetName sets the "name" field. +func (u *ServerMetadataUpsertOne) SetName(v string) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateName() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *ServerMetadataUpsertOne) SetDescription(v string) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateDescription() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *ServerMetadataUpsertOne) ClearDescription() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.ClearDescription() + }) +} + +// SetVersion sets the "version" field. +func (u *ServerMetadataUpsertOne) SetVersion(v string) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetVersion(v) + }) +} + +// UpdateVersion sets the "version" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateVersion() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateVersion() + }) +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (u *ServerMetadataUpsertOne) SetSupportedExtensions(v []string) *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetSupportedExtensions(v) + }) +} + +// UpdateSupportedExtensions sets the "supportedExtensions" field to the value that was provided on create. +func (u *ServerMetadataUpsertOne) UpdateSupportedExtensions() *ServerMetadataUpsertOne { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateSupportedExtensions() + }) +} + +// Exec executes the query. +func (u *ServerMetadataUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ServerMetadataCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ServerMetadataUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *ServerMetadataUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: ServerMetadataUpsertOne.ID is not supported by MySQL driver. Use ServerMetadataUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *ServerMetadataUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// ServerMetadataCreateBulk is the builder for creating many ServerMetadata entities in bulk. +type ServerMetadataCreateBulk struct { + config + err error + builders []*ServerMetadataCreate + conflict []sql.ConflictOption +} + +// Save creates the ServerMetadata entities in the database. +func (smcb *ServerMetadataCreateBulk) Save(ctx context.Context) ([]*ServerMetadata, error) { + if smcb.err != nil { + return nil, smcb.err + } + specs := make([]*sqlgraph.CreateSpec, len(smcb.builders)) + nodes := make([]*ServerMetadata, len(smcb.builders)) + mutators := make([]Mutator, len(smcb.builders)) + for i := range smcb.builders { + func(i int, root context.Context) { + builder := smcb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*ServerMetadataMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, smcb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = smcb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, smcb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, smcb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (smcb *ServerMetadataCreateBulk) SaveX(ctx context.Context) []*ServerMetadata { + v, err := smcb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (smcb *ServerMetadataCreateBulk) Exec(ctx context.Context) error { + _, err := smcb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (smcb *ServerMetadataCreateBulk) ExecX(ctx context.Context) { + if err := smcb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.ServerMetadata.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.ServerMetadataUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (smcb *ServerMetadataCreateBulk) OnConflict(opts ...sql.ConflictOption) *ServerMetadataUpsertBulk { + smcb.conflict = opts + return &ServerMetadataUpsertBulk{ + create: smcb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.ServerMetadata.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (smcb *ServerMetadataCreateBulk) OnConflictColumns(columns ...string) *ServerMetadataUpsertBulk { + smcb.conflict = append(smcb.conflict, sql.ConflictColumns(columns...)) + return &ServerMetadataUpsertBulk{ + create: smcb, + } +} + +// ServerMetadataUpsertBulk is the builder for "upsert"-ing +// a bulk of ServerMetadata nodes. +type ServerMetadataUpsertBulk struct { + create *ServerMetadataCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.ServerMetadata.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(servermetadata.FieldID) +// }), +// ). +// Exec(ctx) +func (u *ServerMetadataUpsertBulk) UpdateNewValues() *ServerMetadataUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(servermetadata.FieldID) + } + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(servermetadata.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.ServerMetadata.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *ServerMetadataUpsertBulk) Ignore() *ServerMetadataUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *ServerMetadataUpsertBulk) DoNothing() *ServerMetadataUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the ServerMetadataCreateBulk.OnConflict +// documentation for more info. +func (u *ServerMetadataUpsertBulk) Update(set func(*ServerMetadataUpsert)) *ServerMetadataUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&ServerMetadataUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *ServerMetadataUpsertBulk) SetIsRemote(v bool) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateIsRemote() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *ServerMetadataUpsertBulk) SetURI(v string) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateURI() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *ServerMetadataUpsertBulk) SetExtensions(v lysand.Extensions) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateExtensions() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *ServerMetadataUpsertBulk) SetUpdatedAt(v time.Time) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateUpdatedAt() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetName sets the "name" field. +func (u *ServerMetadataUpsertBulk) SetName(v string) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetName(v) + }) +} + +// UpdateName sets the "name" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateName() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateName() + }) +} + +// SetDescription sets the "description" field. +func (u *ServerMetadataUpsertBulk) SetDescription(v string) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetDescription(v) + }) +} + +// UpdateDescription sets the "description" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateDescription() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateDescription() + }) +} + +// ClearDescription clears the value of the "description" field. +func (u *ServerMetadataUpsertBulk) ClearDescription() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.ClearDescription() + }) +} + +// SetVersion sets the "version" field. +func (u *ServerMetadataUpsertBulk) SetVersion(v string) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetVersion(v) + }) +} + +// UpdateVersion sets the "version" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateVersion() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateVersion() + }) +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (u *ServerMetadataUpsertBulk) SetSupportedExtensions(v []string) *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.SetSupportedExtensions(v) + }) +} + +// UpdateSupportedExtensions sets the "supportedExtensions" field to the value that was provided on create. +func (u *ServerMetadataUpsertBulk) UpdateSupportedExtensions() *ServerMetadataUpsertBulk { + return u.Update(func(s *ServerMetadataUpsert) { + s.UpdateSupportedExtensions() + }) +} + +// Exec executes the query. +func (u *ServerMetadataUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the ServerMetadataCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for ServerMetadataCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *ServerMetadataUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/servermetadata_delete.go b/ent/servermetadata_delete.go new file mode 100644 index 0000000..47880c9 --- /dev/null +++ b/ent/servermetadata_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/servermetadata" +) + +// ServerMetadataDelete is the builder for deleting a ServerMetadata entity. +type ServerMetadataDelete struct { + config + hooks []Hook + mutation *ServerMetadataMutation +} + +// Where appends a list predicates to the ServerMetadataDelete builder. +func (smd *ServerMetadataDelete) Where(ps ...predicate.ServerMetadata) *ServerMetadataDelete { + smd.mutation.Where(ps...) + return smd +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (smd *ServerMetadataDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, smd.sqlExec, smd.mutation, smd.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (smd *ServerMetadataDelete) ExecX(ctx context.Context) int { + n, err := smd.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (smd *ServerMetadataDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(servermetadata.Table, sqlgraph.NewFieldSpec(servermetadata.FieldID, field.TypeUUID)) + if ps := smd.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, smd.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + smd.mutation.done = true + return affected, err +} + +// ServerMetadataDeleteOne is the builder for deleting a single ServerMetadata entity. +type ServerMetadataDeleteOne struct { + smd *ServerMetadataDelete +} + +// Where appends a list predicates to the ServerMetadataDelete builder. +func (smdo *ServerMetadataDeleteOne) Where(ps ...predicate.ServerMetadata) *ServerMetadataDeleteOne { + smdo.smd.mutation.Where(ps...) + return smdo +} + +// Exec executes the deletion query. +func (smdo *ServerMetadataDeleteOne) Exec(ctx context.Context) error { + n, err := smdo.smd.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{servermetadata.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (smdo *ServerMetadataDeleteOne) ExecX(ctx context.Context) { + if err := smdo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/servermetadata_query.go b/ent/servermetadata_query.go new file mode 100644 index 0000000..48ec4ca --- /dev/null +++ b/ent/servermetadata_query.go @@ -0,0 +1,688 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" +) + +// ServerMetadataQuery is the builder for querying ServerMetadata entities. +type ServerMetadataQuery struct { + config + ctx *QueryContext + order []servermetadata.OrderOption + inters []Interceptor + predicates []predicate.ServerMetadata + withFollower *UserQuery + withFollowee *UserQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the ServerMetadataQuery builder. +func (smq *ServerMetadataQuery) Where(ps ...predicate.ServerMetadata) *ServerMetadataQuery { + smq.predicates = append(smq.predicates, ps...) + return smq +} + +// Limit the number of records to be returned by this query. +func (smq *ServerMetadataQuery) Limit(limit int) *ServerMetadataQuery { + smq.ctx.Limit = &limit + return smq +} + +// Offset to start from. +func (smq *ServerMetadataQuery) Offset(offset int) *ServerMetadataQuery { + smq.ctx.Offset = &offset + return smq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (smq *ServerMetadataQuery) Unique(unique bool) *ServerMetadataQuery { + smq.ctx.Unique = &unique + return smq +} + +// Order specifies how the records should be ordered. +func (smq *ServerMetadataQuery) Order(o ...servermetadata.OrderOption) *ServerMetadataQuery { + smq.order = append(smq.order, o...) + return smq +} + +// QueryFollower chains the current query on the "follower" edge. +func (smq *ServerMetadataQuery) QueryFollower() *UserQuery { + query := (&UserClient{config: smq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := smq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := smq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(servermetadata.Table, servermetadata.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, servermetadata.FollowerTable, servermetadata.FollowerColumn), + ) + fromU = sqlgraph.SetNeighbors(smq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryFollowee chains the current query on the "followee" edge. +func (smq *ServerMetadataQuery) QueryFollowee() *UserQuery { + query := (&UserClient{config: smq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := smq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := smq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(servermetadata.Table, servermetadata.FieldID, selector), + sqlgraph.To(user.Table, user.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, servermetadata.FolloweeTable, servermetadata.FolloweeColumn), + ) + fromU = sqlgraph.SetNeighbors(smq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first ServerMetadata entity from the query. +// Returns a *NotFoundError when no ServerMetadata was found. +func (smq *ServerMetadataQuery) First(ctx context.Context) (*ServerMetadata, error) { + nodes, err := smq.Limit(1).All(setContextOp(ctx, smq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{servermetadata.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (smq *ServerMetadataQuery) FirstX(ctx context.Context) *ServerMetadata { + node, err := smq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first ServerMetadata ID from the query. +// Returns a *NotFoundError when no ServerMetadata ID was found. +func (smq *ServerMetadataQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = smq.Limit(1).IDs(setContextOp(ctx, smq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{servermetadata.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (smq *ServerMetadataQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := smq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single ServerMetadata entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one ServerMetadata entity is found. +// Returns a *NotFoundError when no ServerMetadata entities are found. +func (smq *ServerMetadataQuery) Only(ctx context.Context) (*ServerMetadata, error) { + nodes, err := smq.Limit(2).All(setContextOp(ctx, smq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{servermetadata.Label} + default: + return nil, &NotSingularError{servermetadata.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (smq *ServerMetadataQuery) OnlyX(ctx context.Context) *ServerMetadata { + node, err := smq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only ServerMetadata ID in the query. +// Returns a *NotSingularError when more than one ServerMetadata ID is found. +// Returns a *NotFoundError when no entities are found. +func (smq *ServerMetadataQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = smq.Limit(2).IDs(setContextOp(ctx, smq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{servermetadata.Label} + default: + err = &NotSingularError{servermetadata.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (smq *ServerMetadataQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := smq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of ServerMetadataSlice. +func (smq *ServerMetadataQuery) All(ctx context.Context) ([]*ServerMetadata, error) { + ctx = setContextOp(ctx, smq.ctx, "All") + if err := smq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*ServerMetadata, *ServerMetadataQuery]() + return withInterceptors[[]*ServerMetadata](ctx, smq, qr, smq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (smq *ServerMetadataQuery) AllX(ctx context.Context) []*ServerMetadata { + nodes, err := smq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of ServerMetadata IDs. +func (smq *ServerMetadataQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if smq.ctx.Unique == nil && smq.path != nil { + smq.Unique(true) + } + ctx = setContextOp(ctx, smq.ctx, "IDs") + if err = smq.Select(servermetadata.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (smq *ServerMetadataQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := smq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (smq *ServerMetadataQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, smq.ctx, "Count") + if err := smq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, smq, querierCount[*ServerMetadataQuery](), smq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (smq *ServerMetadataQuery) CountX(ctx context.Context) int { + count, err := smq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (smq *ServerMetadataQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, smq.ctx, "Exist") + switch _, err := smq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (smq *ServerMetadataQuery) ExistX(ctx context.Context) bool { + exist, err := smq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the ServerMetadataQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (smq *ServerMetadataQuery) Clone() *ServerMetadataQuery { + if smq == nil { + return nil + } + return &ServerMetadataQuery{ + config: smq.config, + ctx: smq.ctx.Clone(), + order: append([]servermetadata.OrderOption{}, smq.order...), + inters: append([]Interceptor{}, smq.inters...), + predicates: append([]predicate.ServerMetadata{}, smq.predicates...), + withFollower: smq.withFollower.Clone(), + withFollowee: smq.withFollowee.Clone(), + // clone intermediate query. + sql: smq.sql.Clone(), + path: smq.path, + } +} + +// WithFollower tells the query-builder to eager-load the nodes that are connected to +// the "follower" edge. The optional arguments are used to configure the query builder of the edge. +func (smq *ServerMetadataQuery) WithFollower(opts ...func(*UserQuery)) *ServerMetadataQuery { + query := (&UserClient{config: smq.config}).Query() + for _, opt := range opts { + opt(query) + } + smq.withFollower = query + return smq +} + +// WithFollowee tells the query-builder to eager-load the nodes that are connected to +// the "followee" edge. The optional arguments are used to configure the query builder of the edge. +func (smq *ServerMetadataQuery) WithFollowee(opts ...func(*UserQuery)) *ServerMetadataQuery { + query := (&UserClient{config: smq.config}).Query() + for _, opt := range opts { + opt(query) + } + smq.withFollowee = query + return smq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.ServerMetadata.Query(). +// GroupBy(servermetadata.FieldIsRemote). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (smq *ServerMetadataQuery) GroupBy(field string, fields ...string) *ServerMetadataGroupBy { + smq.ctx.Fields = append([]string{field}, fields...) + grbuild := &ServerMetadataGroupBy{build: smq} + grbuild.flds = &smq.ctx.Fields + grbuild.label = servermetadata.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// } +// +// client.ServerMetadata.Query(). +// Select(servermetadata.FieldIsRemote). +// Scan(ctx, &v) +func (smq *ServerMetadataQuery) Select(fields ...string) *ServerMetadataSelect { + smq.ctx.Fields = append(smq.ctx.Fields, fields...) + sbuild := &ServerMetadataSelect{ServerMetadataQuery: smq} + sbuild.label = servermetadata.Label + sbuild.flds, sbuild.scan = &smq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a ServerMetadataSelect configured with the given aggregations. +func (smq *ServerMetadataQuery) Aggregate(fns ...AggregateFunc) *ServerMetadataSelect { + return smq.Select().Aggregate(fns...) +} + +func (smq *ServerMetadataQuery) prepareQuery(ctx context.Context) error { + for _, inter := range smq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, smq); err != nil { + return err + } + } + } + for _, f := range smq.ctx.Fields { + if !servermetadata.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if smq.path != nil { + prev, err := smq.path(ctx) + if err != nil { + return err + } + smq.sql = prev + } + return nil +} + +func (smq *ServerMetadataQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*ServerMetadata, error) { + var ( + nodes = []*ServerMetadata{} + withFKs = smq.withFKs + _spec = smq.querySpec() + loadedTypes = [2]bool{ + smq.withFollower != nil, + smq.withFollowee != nil, + } + ) + if smq.withFollower != nil || smq.withFollowee != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, servermetadata.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*ServerMetadata).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &ServerMetadata{config: smq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, smq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := smq.withFollower; query != nil { + if err := smq.loadFollower(ctx, query, nodes, nil, + func(n *ServerMetadata, e *User) { n.Edges.Follower = e }); err != nil { + return nil, err + } + } + if query := smq.withFollowee; query != nil { + if err := smq.loadFollowee(ctx, query, nodes, nil, + func(n *ServerMetadata, e *User) { n.Edges.Followee = e }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (smq *ServerMetadataQuery) loadFollower(ctx context.Context, query *UserQuery, nodes []*ServerMetadata, init func(*ServerMetadata), assign func(*ServerMetadata, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ServerMetadata) + for i := range nodes { + if nodes[i].server_metadata_follower == nil { + continue + } + fk := *nodes[i].server_metadata_follower + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_metadata_follower" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (smq *ServerMetadataQuery) loadFollowee(ctx context.Context, query *UserQuery, nodes []*ServerMetadata, init func(*ServerMetadata), assign func(*ServerMetadata, *User)) error { + ids := make([]uuid.UUID, 0, len(nodes)) + nodeids := make(map[uuid.UUID][]*ServerMetadata) + for i := range nodes { + if nodes[i].server_metadata_followee == nil { + continue + } + fk := *nodes[i].server_metadata_followee + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(user.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "server_metadata_followee" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} + +func (smq *ServerMetadataQuery) sqlCount(ctx context.Context) (int, error) { + _spec := smq.querySpec() + _spec.Node.Columns = smq.ctx.Fields + if len(smq.ctx.Fields) > 0 { + _spec.Unique = smq.ctx.Unique != nil && *smq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, smq.driver, _spec) +} + +func (smq *ServerMetadataQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(servermetadata.Table, servermetadata.Columns, sqlgraph.NewFieldSpec(servermetadata.FieldID, field.TypeUUID)) + _spec.From = smq.sql + if unique := smq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if smq.path != nil { + _spec.Unique = true + } + if fields := smq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, servermetadata.FieldID) + for i := range fields { + if fields[i] != servermetadata.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := smq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := smq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := smq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := smq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (smq *ServerMetadataQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(smq.driver.Dialect()) + t1 := builder.Table(servermetadata.Table) + columns := smq.ctx.Fields + if len(columns) == 0 { + columns = servermetadata.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if smq.sql != nil { + selector = smq.sql + selector.Select(selector.Columns(columns...)...) + } + if smq.ctx.Unique != nil && *smq.ctx.Unique { + selector.Distinct() + } + for _, p := range smq.predicates { + p(selector) + } + for _, p := range smq.order { + p(selector) + } + if offset := smq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := smq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// ServerMetadataGroupBy is the group-by builder for ServerMetadata entities. +type ServerMetadataGroupBy struct { + selector + build *ServerMetadataQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (smgb *ServerMetadataGroupBy) Aggregate(fns ...AggregateFunc) *ServerMetadataGroupBy { + smgb.fns = append(smgb.fns, fns...) + return smgb +} + +// Scan applies the selector query and scans the result into the given value. +func (smgb *ServerMetadataGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, smgb.build.ctx, "GroupBy") + if err := smgb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ServerMetadataQuery, *ServerMetadataGroupBy](ctx, smgb.build, smgb, smgb.build.inters, v) +} + +func (smgb *ServerMetadataGroupBy) sqlScan(ctx context.Context, root *ServerMetadataQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(smgb.fns)) + for _, fn := range smgb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*smgb.flds)+len(smgb.fns)) + for _, f := range *smgb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*smgb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := smgb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// ServerMetadataSelect is the builder for selecting fields of ServerMetadata entities. +type ServerMetadataSelect struct { + *ServerMetadataQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (sms *ServerMetadataSelect) Aggregate(fns ...AggregateFunc) *ServerMetadataSelect { + sms.fns = append(sms.fns, fns...) + return sms +} + +// Scan applies the selector query and scans the result into the given value. +func (sms *ServerMetadataSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, sms.ctx, "Select") + if err := sms.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*ServerMetadataQuery, *ServerMetadataSelect](ctx, sms.ServerMetadataQuery, sms, sms.inters, v) +} + +func (sms *ServerMetadataSelect) sqlScan(ctx context.Context, root *ServerMetadataQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(sms.fns)) + for _, fn := range sms.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*sms.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := sms.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/servermetadata_update.go b/ent/servermetadata_update.go new file mode 100644 index 0000000..44c76fd --- /dev/null +++ b/ent/servermetadata_update.go @@ -0,0 +1,704 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/servermetadata" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// ServerMetadataUpdate is the builder for updating ServerMetadata entities. +type ServerMetadataUpdate struct { + config + hooks []Hook + mutation *ServerMetadataMutation +} + +// Where appends a list predicates to the ServerMetadataUpdate builder. +func (smu *ServerMetadataUpdate) Where(ps ...predicate.ServerMetadata) *ServerMetadataUpdate { + smu.mutation.Where(ps...) + return smu +} + +// SetIsRemote sets the "isRemote" field. +func (smu *ServerMetadataUpdate) SetIsRemote(b bool) *ServerMetadataUpdate { + smu.mutation.SetIsRemote(b) + return smu +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (smu *ServerMetadataUpdate) SetNillableIsRemote(b *bool) *ServerMetadataUpdate { + if b != nil { + smu.SetIsRemote(*b) + } + return smu +} + +// SetURI sets the "uri" field. +func (smu *ServerMetadataUpdate) SetURI(s string) *ServerMetadataUpdate { + smu.mutation.SetURI(s) + return smu +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (smu *ServerMetadataUpdate) SetNillableURI(s *string) *ServerMetadataUpdate { + if s != nil { + smu.SetURI(*s) + } + return smu +} + +// SetExtensions sets the "extensions" field. +func (smu *ServerMetadataUpdate) SetExtensions(l lysand.Extensions) *ServerMetadataUpdate { + smu.mutation.SetExtensions(l) + return smu +} + +// SetUpdatedAt sets the "updated_at" field. +func (smu *ServerMetadataUpdate) SetUpdatedAt(t time.Time) *ServerMetadataUpdate { + smu.mutation.SetUpdatedAt(t) + return smu +} + +// SetName sets the "name" field. +func (smu *ServerMetadataUpdate) SetName(s string) *ServerMetadataUpdate { + smu.mutation.SetName(s) + return smu +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (smu *ServerMetadataUpdate) SetNillableName(s *string) *ServerMetadataUpdate { + if s != nil { + smu.SetName(*s) + } + return smu +} + +// SetDescription sets the "description" field. +func (smu *ServerMetadataUpdate) SetDescription(s string) *ServerMetadataUpdate { + smu.mutation.SetDescription(s) + return smu +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (smu *ServerMetadataUpdate) SetNillableDescription(s *string) *ServerMetadataUpdate { + if s != nil { + smu.SetDescription(*s) + } + return smu +} + +// ClearDescription clears the value of the "description" field. +func (smu *ServerMetadataUpdate) ClearDescription() *ServerMetadataUpdate { + smu.mutation.ClearDescription() + return smu +} + +// SetVersion sets the "version" field. +func (smu *ServerMetadataUpdate) SetVersion(s string) *ServerMetadataUpdate { + smu.mutation.SetVersion(s) + return smu +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (smu *ServerMetadataUpdate) SetNillableVersion(s *string) *ServerMetadataUpdate { + if s != nil { + smu.SetVersion(*s) + } + return smu +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (smu *ServerMetadataUpdate) SetSupportedExtensions(s []string) *ServerMetadataUpdate { + smu.mutation.SetSupportedExtensions(s) + return smu +} + +// AppendSupportedExtensions appends s to the "supportedExtensions" field. +func (smu *ServerMetadataUpdate) AppendSupportedExtensions(s []string) *ServerMetadataUpdate { + smu.mutation.AppendSupportedExtensions(s) + return smu +} + +// SetFollowerID sets the "follower" edge to the User entity by ID. +func (smu *ServerMetadataUpdate) SetFollowerID(id uuid.UUID) *ServerMetadataUpdate { + smu.mutation.SetFollowerID(id) + return smu +} + +// SetFollower sets the "follower" edge to the User entity. +func (smu *ServerMetadataUpdate) SetFollower(u *User) *ServerMetadataUpdate { + return smu.SetFollowerID(u.ID) +} + +// SetFolloweeID sets the "followee" edge to the User entity by ID. +func (smu *ServerMetadataUpdate) SetFolloweeID(id uuid.UUID) *ServerMetadataUpdate { + smu.mutation.SetFolloweeID(id) + return smu +} + +// SetFollowee sets the "followee" edge to the User entity. +func (smu *ServerMetadataUpdate) SetFollowee(u *User) *ServerMetadataUpdate { + return smu.SetFolloweeID(u.ID) +} + +// Mutation returns the ServerMetadataMutation object of the builder. +func (smu *ServerMetadataUpdate) Mutation() *ServerMetadataMutation { + return smu.mutation +} + +// ClearFollower clears the "follower" edge to the User entity. +func (smu *ServerMetadataUpdate) ClearFollower() *ServerMetadataUpdate { + smu.mutation.ClearFollower() + return smu +} + +// ClearFollowee clears the "followee" edge to the User entity. +func (smu *ServerMetadataUpdate) ClearFollowee() *ServerMetadataUpdate { + smu.mutation.ClearFollowee() + return smu +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (smu *ServerMetadataUpdate) Save(ctx context.Context) (int, error) { + smu.defaults() + return withHooks(ctx, smu.sqlSave, smu.mutation, smu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (smu *ServerMetadataUpdate) SaveX(ctx context.Context) int { + affected, err := smu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (smu *ServerMetadataUpdate) Exec(ctx context.Context) error { + _, err := smu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (smu *ServerMetadataUpdate) ExecX(ctx context.Context) { + if err := smu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (smu *ServerMetadataUpdate) defaults() { + if _, ok := smu.mutation.UpdatedAt(); !ok { + v := servermetadata.UpdateDefaultUpdatedAt() + smu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (smu *ServerMetadataUpdate) check() error { + if v, ok := smu.mutation.URI(); ok { + if err := servermetadata.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.uri": %w`, err)} + } + } + if v, ok := smu.mutation.Name(); ok { + if err := servermetadata.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.name": %w`, err)} + } + } + if v, ok := smu.mutation.Version(); ok { + if err := servermetadata.VersionValidator(v); err != nil { + return &ValidationError{Name: "version", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.version": %w`, err)} + } + } + if _, ok := smu.mutation.FollowerID(); smu.mutation.FollowerCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "ServerMetadata.follower"`) + } + if _, ok := smu.mutation.FolloweeID(); smu.mutation.FolloweeCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "ServerMetadata.followee"`) + } + return nil +} + +func (smu *ServerMetadataUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := smu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(servermetadata.Table, servermetadata.Columns, sqlgraph.NewFieldSpec(servermetadata.FieldID, field.TypeUUID)) + if ps := smu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := smu.mutation.IsRemote(); ok { + _spec.SetField(servermetadata.FieldIsRemote, field.TypeBool, value) + } + if value, ok := smu.mutation.URI(); ok { + _spec.SetField(servermetadata.FieldURI, field.TypeString, value) + } + if value, ok := smu.mutation.Extensions(); ok { + _spec.SetField(servermetadata.FieldExtensions, field.TypeJSON, value) + } + if value, ok := smu.mutation.UpdatedAt(); ok { + _spec.SetField(servermetadata.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := smu.mutation.Name(); ok { + _spec.SetField(servermetadata.FieldName, field.TypeString, value) + } + if value, ok := smu.mutation.Description(); ok { + _spec.SetField(servermetadata.FieldDescription, field.TypeString, value) + } + if smu.mutation.DescriptionCleared() { + _spec.ClearField(servermetadata.FieldDescription, field.TypeString) + } + if value, ok := smu.mutation.Version(); ok { + _spec.SetField(servermetadata.FieldVersion, field.TypeString, value) + } + if value, ok := smu.mutation.SupportedExtensions(); ok { + _spec.SetField(servermetadata.FieldSupportedExtensions, field.TypeJSON, value) + } + if value, ok := smu.mutation.AppendedSupportedExtensions(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, servermetadata.FieldSupportedExtensions, value) + }) + } + if smu.mutation.FollowerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FollowerTable, + Columns: []string{servermetadata.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := smu.mutation.FollowerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FollowerTable, + Columns: []string{servermetadata.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if smu.mutation.FolloweeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FolloweeTable, + Columns: []string{servermetadata.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := smu.mutation.FolloweeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FolloweeTable, + Columns: []string{servermetadata.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, smu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{servermetadata.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + smu.mutation.done = true + return n, nil +} + +// ServerMetadataUpdateOne is the builder for updating a single ServerMetadata entity. +type ServerMetadataUpdateOne struct { + config + fields []string + hooks []Hook + mutation *ServerMetadataMutation +} + +// SetIsRemote sets the "isRemote" field. +func (smuo *ServerMetadataUpdateOne) SetIsRemote(b bool) *ServerMetadataUpdateOne { + smuo.mutation.SetIsRemote(b) + return smuo +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (smuo *ServerMetadataUpdateOne) SetNillableIsRemote(b *bool) *ServerMetadataUpdateOne { + if b != nil { + smuo.SetIsRemote(*b) + } + return smuo +} + +// SetURI sets the "uri" field. +func (smuo *ServerMetadataUpdateOne) SetURI(s string) *ServerMetadataUpdateOne { + smuo.mutation.SetURI(s) + return smuo +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (smuo *ServerMetadataUpdateOne) SetNillableURI(s *string) *ServerMetadataUpdateOne { + if s != nil { + smuo.SetURI(*s) + } + return smuo +} + +// SetExtensions sets the "extensions" field. +func (smuo *ServerMetadataUpdateOne) SetExtensions(l lysand.Extensions) *ServerMetadataUpdateOne { + smuo.mutation.SetExtensions(l) + return smuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (smuo *ServerMetadataUpdateOne) SetUpdatedAt(t time.Time) *ServerMetadataUpdateOne { + smuo.mutation.SetUpdatedAt(t) + return smuo +} + +// SetName sets the "name" field. +func (smuo *ServerMetadataUpdateOne) SetName(s string) *ServerMetadataUpdateOne { + smuo.mutation.SetName(s) + return smuo +} + +// SetNillableName sets the "name" field if the given value is not nil. +func (smuo *ServerMetadataUpdateOne) SetNillableName(s *string) *ServerMetadataUpdateOne { + if s != nil { + smuo.SetName(*s) + } + return smuo +} + +// SetDescription sets the "description" field. +func (smuo *ServerMetadataUpdateOne) SetDescription(s string) *ServerMetadataUpdateOne { + smuo.mutation.SetDescription(s) + return smuo +} + +// SetNillableDescription sets the "description" field if the given value is not nil. +func (smuo *ServerMetadataUpdateOne) SetNillableDescription(s *string) *ServerMetadataUpdateOne { + if s != nil { + smuo.SetDescription(*s) + } + return smuo +} + +// ClearDescription clears the value of the "description" field. +func (smuo *ServerMetadataUpdateOne) ClearDescription() *ServerMetadataUpdateOne { + smuo.mutation.ClearDescription() + return smuo +} + +// SetVersion sets the "version" field. +func (smuo *ServerMetadataUpdateOne) SetVersion(s string) *ServerMetadataUpdateOne { + smuo.mutation.SetVersion(s) + return smuo +} + +// SetNillableVersion sets the "version" field if the given value is not nil. +func (smuo *ServerMetadataUpdateOne) SetNillableVersion(s *string) *ServerMetadataUpdateOne { + if s != nil { + smuo.SetVersion(*s) + } + return smuo +} + +// SetSupportedExtensions sets the "supportedExtensions" field. +func (smuo *ServerMetadataUpdateOne) SetSupportedExtensions(s []string) *ServerMetadataUpdateOne { + smuo.mutation.SetSupportedExtensions(s) + return smuo +} + +// AppendSupportedExtensions appends s to the "supportedExtensions" field. +func (smuo *ServerMetadataUpdateOne) AppendSupportedExtensions(s []string) *ServerMetadataUpdateOne { + smuo.mutation.AppendSupportedExtensions(s) + return smuo +} + +// SetFollowerID sets the "follower" edge to the User entity by ID. +func (smuo *ServerMetadataUpdateOne) SetFollowerID(id uuid.UUID) *ServerMetadataUpdateOne { + smuo.mutation.SetFollowerID(id) + return smuo +} + +// SetFollower sets the "follower" edge to the User entity. +func (smuo *ServerMetadataUpdateOne) SetFollower(u *User) *ServerMetadataUpdateOne { + return smuo.SetFollowerID(u.ID) +} + +// SetFolloweeID sets the "followee" edge to the User entity by ID. +func (smuo *ServerMetadataUpdateOne) SetFolloweeID(id uuid.UUID) *ServerMetadataUpdateOne { + smuo.mutation.SetFolloweeID(id) + return smuo +} + +// SetFollowee sets the "followee" edge to the User entity. +func (smuo *ServerMetadataUpdateOne) SetFollowee(u *User) *ServerMetadataUpdateOne { + return smuo.SetFolloweeID(u.ID) +} + +// Mutation returns the ServerMetadataMutation object of the builder. +func (smuo *ServerMetadataUpdateOne) Mutation() *ServerMetadataMutation { + return smuo.mutation +} + +// ClearFollower clears the "follower" edge to the User entity. +func (smuo *ServerMetadataUpdateOne) ClearFollower() *ServerMetadataUpdateOne { + smuo.mutation.ClearFollower() + return smuo +} + +// ClearFollowee clears the "followee" edge to the User entity. +func (smuo *ServerMetadataUpdateOne) ClearFollowee() *ServerMetadataUpdateOne { + smuo.mutation.ClearFollowee() + return smuo +} + +// Where appends a list predicates to the ServerMetadataUpdate builder. +func (smuo *ServerMetadataUpdateOne) Where(ps ...predicate.ServerMetadata) *ServerMetadataUpdateOne { + smuo.mutation.Where(ps...) + return smuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (smuo *ServerMetadataUpdateOne) Select(field string, fields ...string) *ServerMetadataUpdateOne { + smuo.fields = append([]string{field}, fields...) + return smuo +} + +// Save executes the query and returns the updated ServerMetadata entity. +func (smuo *ServerMetadataUpdateOne) Save(ctx context.Context) (*ServerMetadata, error) { + smuo.defaults() + return withHooks(ctx, smuo.sqlSave, smuo.mutation, smuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (smuo *ServerMetadataUpdateOne) SaveX(ctx context.Context) *ServerMetadata { + node, err := smuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (smuo *ServerMetadataUpdateOne) Exec(ctx context.Context) error { + _, err := smuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (smuo *ServerMetadataUpdateOne) ExecX(ctx context.Context) { + if err := smuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (smuo *ServerMetadataUpdateOne) defaults() { + if _, ok := smuo.mutation.UpdatedAt(); !ok { + v := servermetadata.UpdateDefaultUpdatedAt() + smuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (smuo *ServerMetadataUpdateOne) check() error { + if v, ok := smuo.mutation.URI(); ok { + if err := servermetadata.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.uri": %w`, err)} + } + } + if v, ok := smuo.mutation.Name(); ok { + if err := servermetadata.NameValidator(v); err != nil { + return &ValidationError{Name: "name", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.name": %w`, err)} + } + } + if v, ok := smuo.mutation.Version(); ok { + if err := servermetadata.VersionValidator(v); err != nil { + return &ValidationError{Name: "version", err: fmt.Errorf(`ent: validator failed for field "ServerMetadata.version": %w`, err)} + } + } + if _, ok := smuo.mutation.FollowerID(); smuo.mutation.FollowerCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "ServerMetadata.follower"`) + } + if _, ok := smuo.mutation.FolloweeID(); smuo.mutation.FolloweeCleared() && !ok { + return errors.New(`ent: clearing a required unique edge "ServerMetadata.followee"`) + } + return nil +} + +func (smuo *ServerMetadataUpdateOne) sqlSave(ctx context.Context) (_node *ServerMetadata, err error) { + if err := smuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(servermetadata.Table, servermetadata.Columns, sqlgraph.NewFieldSpec(servermetadata.FieldID, field.TypeUUID)) + id, ok := smuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "ServerMetadata.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := smuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, servermetadata.FieldID) + for _, f := range fields { + if !servermetadata.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != servermetadata.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := smuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := smuo.mutation.IsRemote(); ok { + _spec.SetField(servermetadata.FieldIsRemote, field.TypeBool, value) + } + if value, ok := smuo.mutation.URI(); ok { + _spec.SetField(servermetadata.FieldURI, field.TypeString, value) + } + if value, ok := smuo.mutation.Extensions(); ok { + _spec.SetField(servermetadata.FieldExtensions, field.TypeJSON, value) + } + if value, ok := smuo.mutation.UpdatedAt(); ok { + _spec.SetField(servermetadata.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := smuo.mutation.Name(); ok { + _spec.SetField(servermetadata.FieldName, field.TypeString, value) + } + if value, ok := smuo.mutation.Description(); ok { + _spec.SetField(servermetadata.FieldDescription, field.TypeString, value) + } + if smuo.mutation.DescriptionCleared() { + _spec.ClearField(servermetadata.FieldDescription, field.TypeString) + } + if value, ok := smuo.mutation.Version(); ok { + _spec.SetField(servermetadata.FieldVersion, field.TypeString, value) + } + if value, ok := smuo.mutation.SupportedExtensions(); ok { + _spec.SetField(servermetadata.FieldSupportedExtensions, field.TypeJSON, value) + } + if value, ok := smuo.mutation.AppendedSupportedExtensions(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, servermetadata.FieldSupportedExtensions, value) + }) + } + if smuo.mutation.FollowerCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FollowerTable, + Columns: []string{servermetadata.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := smuo.mutation.FollowerIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FollowerTable, + Columns: []string{servermetadata.FollowerColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if smuo.mutation.FolloweeCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FolloweeTable, + Columns: []string{servermetadata.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := smuo.mutation.FolloweeIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: servermetadata.FolloweeTable, + Columns: []string{servermetadata.FolloweeColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &ServerMetadata{config: smuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, smuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{servermetadata.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + smuo.mutation.done = true + return _node, nil +} diff --git a/ent/tx.go b/ent/tx.go new file mode 100644 index 0000000..1642262 --- /dev/null +++ b/ent/tx.go @@ -0,0 +1,225 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "sync" + + "entgo.io/ent/dialect" +) + +// Tx is a transactional client that is created by calling Client.Tx(). +type Tx struct { + config + // Attachment is the client for interacting with the Attachment builders. + Attachment *AttachmentClient + // Follow is the client for interacting with the Follow builders. + Follow *FollowClient + // Image is the client for interacting with the Image builders. + Image *ImageClient + // Note is the client for interacting with the Note builders. + Note *NoteClient + // ServerMetadata is the client for interacting with the ServerMetadata builders. + ServerMetadata *ServerMetadataClient + // User is the client for interacting with the User builders. + User *UserClient + + // lazily loaded. + client *Client + clientOnce sync.Once + // ctx lives for the life of the transaction. It is + // the same context used by the underlying connection. + ctx context.Context +} + +type ( + // Committer is the interface that wraps the Commit method. + Committer interface { + Commit(context.Context, *Tx) error + } + + // The CommitFunc type is an adapter to allow the use of ordinary + // function as a Committer. If f is a function with the appropriate + // signature, CommitFunc(f) is a Committer that calls f. + CommitFunc func(context.Context, *Tx) error + + // CommitHook defines the "commit middleware". A function that gets a Committer + // and returns a Committer. For example: + // + // hook := func(next ent.Committer) ent.Committer { + // return ent.CommitFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Commit(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + CommitHook func(Committer) Committer +) + +// Commit calls f(ctx, m). +func (f CommitFunc) Commit(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Commit commits the transaction. +func (tx *Tx) Commit() error { + txDriver := tx.config.driver.(*txDriver) + var fn Committer = CommitFunc(func(context.Context, *Tx) error { + return txDriver.tx.Commit() + }) + txDriver.mu.Lock() + hooks := append([]CommitHook(nil), txDriver.onCommit...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Commit(tx.ctx, tx) +} + +// OnCommit adds a hook to call on commit. +func (tx *Tx) OnCommit(f CommitHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onCommit = append(txDriver.onCommit, f) + txDriver.mu.Unlock() +} + +type ( + // Rollbacker is the interface that wraps the Rollback method. + Rollbacker interface { + Rollback(context.Context, *Tx) error + } + + // The RollbackFunc type is an adapter to allow the use of ordinary + // function as a Rollbacker. If f is a function with the appropriate + // signature, RollbackFunc(f) is a Rollbacker that calls f. + RollbackFunc func(context.Context, *Tx) error + + // RollbackHook defines the "rollback middleware". A function that gets a Rollbacker + // and returns a Rollbacker. For example: + // + // hook := func(next ent.Rollbacker) ent.Rollbacker { + // return ent.RollbackFunc(func(ctx context.Context, tx *ent.Tx) error { + // // Do some stuff before. + // if err := next.Rollback(ctx, tx); err != nil { + // return err + // } + // // Do some stuff after. + // return nil + // }) + // } + // + RollbackHook func(Rollbacker) Rollbacker +) + +// Rollback calls f(ctx, m). +func (f RollbackFunc) Rollback(ctx context.Context, tx *Tx) error { + return f(ctx, tx) +} + +// Rollback rollbacks the transaction. +func (tx *Tx) Rollback() error { + txDriver := tx.config.driver.(*txDriver) + var fn Rollbacker = RollbackFunc(func(context.Context, *Tx) error { + return txDriver.tx.Rollback() + }) + txDriver.mu.Lock() + hooks := append([]RollbackHook(nil), txDriver.onRollback...) + txDriver.mu.Unlock() + for i := len(hooks) - 1; i >= 0; i-- { + fn = hooks[i](fn) + } + return fn.Rollback(tx.ctx, tx) +} + +// OnRollback adds a hook to call on rollback. +func (tx *Tx) OnRollback(f RollbackHook) { + txDriver := tx.config.driver.(*txDriver) + txDriver.mu.Lock() + txDriver.onRollback = append(txDriver.onRollback, f) + txDriver.mu.Unlock() +} + +// Client returns a Client that binds to current transaction. +func (tx *Tx) Client() *Client { + tx.clientOnce.Do(func() { + tx.client = &Client{config: tx.config} + tx.client.init() + }) + return tx.client +} + +func (tx *Tx) init() { + tx.Attachment = NewAttachmentClient(tx.config) + tx.Follow = NewFollowClient(tx.config) + tx.Image = NewImageClient(tx.config) + tx.Note = NewNoteClient(tx.config) + tx.ServerMetadata = NewServerMetadataClient(tx.config) + tx.User = NewUserClient(tx.config) +} + +// txDriver wraps the given dialect.Tx with a nop dialect.Driver implementation. +// The idea is to support transactions without adding any extra code to the builders. +// When a builder calls to driver.Tx(), it gets the same dialect.Tx instance. +// Commit and Rollback are nop for the internal builders and the user must call one +// of them in order to commit or rollback the transaction. +// +// If a closed transaction is embedded in one of the generated entities, and the entity +// applies a query, for example: Attachment.QueryXXX(), the query will be executed +// through the driver which created this transaction. +// +// Note that txDriver is not goroutine safe. +type txDriver struct { + // the driver we started the transaction from. + drv dialect.Driver + // tx is the underlying transaction. + tx dialect.Tx + // completion hooks. + mu sync.Mutex + onCommit []CommitHook + onRollback []RollbackHook +} + +// newTx creates a new transactional driver. +func newTx(ctx context.Context, drv dialect.Driver) (*txDriver, error) { + tx, err := drv.Tx(ctx) + if err != nil { + return nil, err + } + return &txDriver{tx: tx, drv: drv}, nil +} + +// Tx returns the transaction wrapper (txDriver) to avoid Commit or Rollback calls +// from the internal builders. Should be called only by the internal builders. +func (tx *txDriver) Tx(context.Context) (dialect.Tx, error) { return tx, nil } + +// Dialect returns the dialect of the driver we started the transaction from. +func (tx *txDriver) Dialect() string { return tx.drv.Dialect() } + +// Close is a nop close. +func (*txDriver) Close() error { return nil } + +// Commit is a nop commit for the internal builders. +// User must call `Tx.Commit` in order to commit the transaction. +func (*txDriver) Commit() error { return nil } + +// Rollback is a nop rollback for the internal builders. +// User must call `Tx.Rollback` in order to rollback the transaction. +func (*txDriver) Rollback() error { return nil } + +// Exec calls tx.Exec. +func (tx *txDriver) Exec(ctx context.Context, query string, args, v any) error { + return tx.tx.Exec(ctx, query, args, v) +} + +// Query calls tx.Query. +func (tx *txDriver) Query(ctx context.Context, query string, args, v any) error { + return tx.tx.Query(ctx, query, args, v) +} + +var _ dialect.Driver = (*txDriver)(nil) diff --git a/ent/user.go b/ent/user.go new file mode 100644 index 0000000..c550964 --- /dev/null +++ b/ent/user.go @@ -0,0 +1,423 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "crypto/ed25519" + "encoding/json" + "fmt" + "strings" + "time" + + "entgo.io/ent" + "entgo.io/ent/dialect/sql" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// User is the model entity for the User schema. +type User struct { + config `json:"-"` + // ID of the ent. + ID uuid.UUID `json:"id,omitempty"` + // IsRemote holds the value of the "isRemote" field. + IsRemote bool `json:"isRemote,omitempty"` + // URI holds the value of the "uri" field. + URI string `json:"uri,omitempty"` + // Extensions holds the value of the "extensions" field. + Extensions lysand.Extensions `json:"extensions,omitempty"` + // CreatedAt holds the value of the "created_at" field. + CreatedAt time.Time `json:"created_at,omitempty"` + // UpdatedAt holds the value of the "updated_at" field. + UpdatedAt time.Time `json:"updated_at,omitempty"` + // Username holds the value of the "username" field. + Username string `json:"username,omitempty"` + // PasswordHash holds the value of the "passwordHash" field. + PasswordHash *[]byte `json:"passwordHash,omitempty"` + // DisplayName holds the value of the "displayName" field. + DisplayName *string `json:"displayName,omitempty"` + // Biography holds the value of the "biography" field. + Biography *string `json:"biography,omitempty"` + // PublicKey holds the value of the "publicKey" field. + PublicKey ed25519.PublicKey `json:"publicKey,omitempty"` + // PrivateKey holds the value of the "privateKey" field. + PrivateKey ed25519.PrivateKey `json:"privateKey,omitempty"` + // Indexable holds the value of the "indexable" field. + Indexable bool `json:"indexable,omitempty"` + // PrivacyLevel holds the value of the "privacyLevel" field. + PrivacyLevel user.PrivacyLevel `json:"privacyLevel,omitempty"` + // Fields holds the value of the "fields" field. + Fields []lysand.Field `json:"fields,omitempty"` + // Inbox holds the value of the "inbox" field. + Inbox string `json:"inbox,omitempty"` + // Featured holds the value of the "featured" field. + Featured string `json:"featured,omitempty"` + // Followers holds the value of the "followers" field. + Followers string `json:"followers,omitempty"` + // Following holds the value of the "following" field. + Following string `json:"following,omitempty"` + // Outbox holds the value of the "outbox" field. + Outbox string `json:"outbox,omitempty"` + // Edges holds the relations/edges for other nodes in the graph. + // The values are being populated by the UserQuery when eager-loading is set. + Edges UserEdges `json:"edges"` + user_avatar_image *int + user_header_image *int + selectValues sql.SelectValues +} + +// UserEdges holds the relations/edges for other nodes in the graph. +type UserEdges struct { + // AvatarImage holds the value of the avatarImage edge. + AvatarImage *Image `json:"avatarImage,omitempty"` + // HeaderImage holds the value of the headerImage edge. + HeaderImage *Image `json:"headerImage,omitempty"` + // AuthoredNotes holds the value of the authoredNotes edge. + AuthoredNotes []*Note `json:"authoredNotes,omitempty"` + // MentionedNotes holds the value of the mentionedNotes edge. + MentionedNotes []*Note `json:"mentionedNotes,omitempty"` + // loadedTypes holds the information for reporting if a + // type was loaded (or requested) in eager-loading or not. + loadedTypes [4]bool +} + +// AvatarImageOrErr returns the AvatarImage value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserEdges) AvatarImageOrErr() (*Image, error) { + if e.AvatarImage != nil { + return e.AvatarImage, nil + } else if e.loadedTypes[0] { + return nil, &NotFoundError{label: image.Label} + } + return nil, &NotLoadedError{edge: "avatarImage"} +} + +// HeaderImageOrErr returns the HeaderImage value or an error if the edge +// was not loaded in eager-loading, or loaded but was not found. +func (e UserEdges) HeaderImageOrErr() (*Image, error) { + if e.HeaderImage != nil { + return e.HeaderImage, nil + } else if e.loadedTypes[1] { + return nil, &NotFoundError{label: image.Label} + } + return nil, &NotLoadedError{edge: "headerImage"} +} + +// AuthoredNotesOrErr returns the AuthoredNotes value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) AuthoredNotesOrErr() ([]*Note, error) { + if e.loadedTypes[2] { + return e.AuthoredNotes, nil + } + return nil, &NotLoadedError{edge: "authoredNotes"} +} + +// MentionedNotesOrErr returns the MentionedNotes value or an error if the edge +// was not loaded in eager-loading. +func (e UserEdges) MentionedNotesOrErr() ([]*Note, error) { + if e.loadedTypes[3] { + return e.MentionedNotes, nil + } + return nil, &NotLoadedError{edge: "mentionedNotes"} +} + +// scanValues returns the types for scanning values from sql.Rows. +func (*User) scanValues(columns []string) ([]any, error) { + values := make([]any, len(columns)) + for i := range columns { + switch columns[i] { + case user.FieldExtensions, user.FieldPasswordHash, user.FieldPublicKey, user.FieldPrivateKey, user.FieldFields: + values[i] = new([]byte) + case user.FieldIsRemote, user.FieldIndexable: + values[i] = new(sql.NullBool) + case user.FieldURI, user.FieldUsername, user.FieldDisplayName, user.FieldBiography, user.FieldPrivacyLevel, user.FieldInbox, user.FieldFeatured, user.FieldFollowers, user.FieldFollowing, user.FieldOutbox: + values[i] = new(sql.NullString) + case user.FieldCreatedAt, user.FieldUpdatedAt: + values[i] = new(sql.NullTime) + case user.FieldID: + values[i] = new(uuid.UUID) + case user.ForeignKeys[0]: // user_avatar_image + values[i] = new(sql.NullInt64) + case user.ForeignKeys[1]: // user_header_image + values[i] = new(sql.NullInt64) + default: + values[i] = new(sql.UnknownType) + } + } + return values, nil +} + +// assignValues assigns the values that were returned from sql.Rows (after scanning) +// to the User fields. +func (u *User) assignValues(columns []string, values []any) error { + if m, n := len(values), len(columns); m < n { + return fmt.Errorf("mismatch number of scan values: %d != %d", m, n) + } + for i := range columns { + switch columns[i] { + case user.FieldID: + if value, ok := values[i].(*uuid.UUID); !ok { + return fmt.Errorf("unexpected type %T for field id", values[i]) + } else if value != nil { + u.ID = *value + } + case user.FieldIsRemote: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field isRemote", values[i]) + } else if value.Valid { + u.IsRemote = value.Bool + } + case user.FieldURI: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field uri", values[i]) + } else if value.Valid { + u.URI = value.String + } + case user.FieldExtensions: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field extensions", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &u.Extensions); err != nil { + return fmt.Errorf("unmarshal field extensions: %w", err) + } + } + case user.FieldCreatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field created_at", values[i]) + } else if value.Valid { + u.CreatedAt = value.Time + } + case user.FieldUpdatedAt: + if value, ok := values[i].(*sql.NullTime); !ok { + return fmt.Errorf("unexpected type %T for field updated_at", values[i]) + } else if value.Valid { + u.UpdatedAt = value.Time + } + case user.FieldUsername: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field username", values[i]) + } else if value.Valid { + u.Username = value.String + } + case user.FieldPasswordHash: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field passwordHash", values[i]) + } else if value != nil { + u.PasswordHash = value + } + case user.FieldDisplayName: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field displayName", values[i]) + } else if value.Valid { + u.DisplayName = new(string) + *u.DisplayName = value.String + } + case user.FieldBiography: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field biography", values[i]) + } else if value.Valid { + u.Biography = new(string) + *u.Biography = value.String + } + case user.FieldPublicKey: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field publicKey", values[i]) + } else if value != nil { + u.PublicKey = *value + } + case user.FieldPrivateKey: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field privateKey", values[i]) + } else if value != nil { + u.PrivateKey = *value + } + case user.FieldIndexable: + if value, ok := values[i].(*sql.NullBool); !ok { + return fmt.Errorf("unexpected type %T for field indexable", values[i]) + } else if value.Valid { + u.Indexable = value.Bool + } + case user.FieldPrivacyLevel: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field privacyLevel", values[i]) + } else if value.Valid { + u.PrivacyLevel = user.PrivacyLevel(value.String) + } + case user.FieldFields: + if value, ok := values[i].(*[]byte); !ok { + return fmt.Errorf("unexpected type %T for field fields", values[i]) + } else if value != nil && len(*value) > 0 { + if err := json.Unmarshal(*value, &u.Fields); err != nil { + return fmt.Errorf("unmarshal field fields: %w", err) + } + } + case user.FieldInbox: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field inbox", values[i]) + } else if value.Valid { + u.Inbox = value.String + } + case user.FieldFeatured: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field featured", values[i]) + } else if value.Valid { + u.Featured = value.String + } + case user.FieldFollowers: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field followers", values[i]) + } else if value.Valid { + u.Followers = value.String + } + case user.FieldFollowing: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field following", values[i]) + } else if value.Valid { + u.Following = value.String + } + case user.FieldOutbox: + if value, ok := values[i].(*sql.NullString); !ok { + return fmt.Errorf("unexpected type %T for field outbox", values[i]) + } else if value.Valid { + u.Outbox = value.String + } + case user.ForeignKeys[0]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field user_avatar_image", value) + } else if value.Valid { + u.user_avatar_image = new(int) + *u.user_avatar_image = int(value.Int64) + } + case user.ForeignKeys[1]: + if value, ok := values[i].(*sql.NullInt64); !ok { + return fmt.Errorf("unexpected type %T for edge-field user_header_image", value) + } else if value.Valid { + u.user_header_image = new(int) + *u.user_header_image = int(value.Int64) + } + default: + u.selectValues.Set(columns[i], values[i]) + } + } + return nil +} + +// Value returns the ent.Value that was dynamically selected and assigned to the User. +// This includes values selected through modifiers, order, etc. +func (u *User) Value(name string) (ent.Value, error) { + return u.selectValues.Get(name) +} + +// QueryAvatarImage queries the "avatarImage" edge of the User entity. +func (u *User) QueryAvatarImage() *ImageQuery { + return NewUserClient(u.config).QueryAvatarImage(u) +} + +// QueryHeaderImage queries the "headerImage" edge of the User entity. +func (u *User) QueryHeaderImage() *ImageQuery { + return NewUserClient(u.config).QueryHeaderImage(u) +} + +// QueryAuthoredNotes queries the "authoredNotes" edge of the User entity. +func (u *User) QueryAuthoredNotes() *NoteQuery { + return NewUserClient(u.config).QueryAuthoredNotes(u) +} + +// QueryMentionedNotes queries the "mentionedNotes" edge of the User entity. +func (u *User) QueryMentionedNotes() *NoteQuery { + return NewUserClient(u.config).QueryMentionedNotes(u) +} + +// Update returns a builder for updating this User. +// Note that you need to call User.Unwrap() before calling this method if this User +// was returned from a transaction, and the transaction was committed or rolled back. +func (u *User) Update() *UserUpdateOne { + return NewUserClient(u.config).UpdateOne(u) +} + +// Unwrap unwraps the User entity that was returned from a transaction after it was closed, +// so that all future queries will be executed through the driver which created the transaction. +func (u *User) Unwrap() *User { + _tx, ok := u.config.driver.(*txDriver) + if !ok { + panic("ent: User is not a transactional entity") + } + u.config.driver = _tx.drv + return u +} + +// String implements the fmt.Stringer. +func (u *User) String() string { + var builder strings.Builder + builder.WriteString("User(") + builder.WriteString(fmt.Sprintf("id=%v, ", u.ID)) + builder.WriteString("isRemote=") + builder.WriteString(fmt.Sprintf("%v", u.IsRemote)) + builder.WriteString(", ") + builder.WriteString("uri=") + builder.WriteString(u.URI) + builder.WriteString(", ") + builder.WriteString("extensions=") + builder.WriteString(fmt.Sprintf("%v", u.Extensions)) + builder.WriteString(", ") + builder.WriteString("created_at=") + builder.WriteString(u.CreatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("updated_at=") + builder.WriteString(u.UpdatedAt.Format(time.ANSIC)) + builder.WriteString(", ") + builder.WriteString("username=") + builder.WriteString(u.Username) + builder.WriteString(", ") + if v := u.PasswordHash; v != nil { + builder.WriteString("passwordHash=") + builder.WriteString(fmt.Sprintf("%v", *v)) + } + builder.WriteString(", ") + if v := u.DisplayName; v != nil { + builder.WriteString("displayName=") + builder.WriteString(*v) + } + builder.WriteString(", ") + if v := u.Biography; v != nil { + builder.WriteString("biography=") + builder.WriteString(*v) + } + builder.WriteString(", ") + builder.WriteString("publicKey=") + builder.WriteString(fmt.Sprintf("%v", u.PublicKey)) + builder.WriteString(", ") + builder.WriteString("privateKey=") + builder.WriteString(fmt.Sprintf("%v", u.PrivateKey)) + builder.WriteString(", ") + builder.WriteString("indexable=") + builder.WriteString(fmt.Sprintf("%v", u.Indexable)) + builder.WriteString(", ") + builder.WriteString("privacyLevel=") + builder.WriteString(fmt.Sprintf("%v", u.PrivacyLevel)) + builder.WriteString(", ") + builder.WriteString("fields=") + builder.WriteString(fmt.Sprintf("%v", u.Fields)) + builder.WriteString(", ") + builder.WriteString("inbox=") + builder.WriteString(u.Inbox) + builder.WriteString(", ") + builder.WriteString("featured=") + builder.WriteString(u.Featured) + builder.WriteString(", ") + builder.WriteString("followers=") + builder.WriteString(u.Followers) + builder.WriteString(", ") + builder.WriteString("following=") + builder.WriteString(u.Following) + builder.WriteString(", ") + builder.WriteString("outbox=") + builder.WriteString(u.Outbox) + builder.WriteByte(')') + return builder.String() +} + +// Users is a parsable slice of User. +type Users []*User diff --git a/ent/user/user.go b/ent/user/user.go new file mode 100644 index 0000000..fb0739e --- /dev/null +++ b/ent/user/user.go @@ -0,0 +1,354 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const ( + // Label holds the string label denoting the user type in the database. + Label = "user" + // FieldID holds the string denoting the id field in the database. + FieldID = "id" + // FieldIsRemote holds the string denoting the isremote field in the database. + FieldIsRemote = "is_remote" + // FieldURI holds the string denoting the uri field in the database. + FieldURI = "uri" + // FieldExtensions holds the string denoting the extensions field in the database. + FieldExtensions = "extensions" + // FieldCreatedAt holds the string denoting the created_at field in the database. + FieldCreatedAt = "created_at" + // FieldUpdatedAt holds the string denoting the updated_at field in the database. + FieldUpdatedAt = "updated_at" + // FieldUsername holds the string denoting the username field in the database. + FieldUsername = "username" + // FieldPasswordHash holds the string denoting the passwordhash field in the database. + FieldPasswordHash = "password_hash" + // FieldDisplayName holds the string denoting the displayname field in the database. + FieldDisplayName = "display_name" + // FieldBiography holds the string denoting the biography field in the database. + FieldBiography = "biography" + // FieldPublicKey holds the string denoting the publickey field in the database. + FieldPublicKey = "public_key" + // FieldPrivateKey holds the string denoting the privatekey field in the database. + FieldPrivateKey = "private_key" + // FieldIndexable holds the string denoting the indexable field in the database. + FieldIndexable = "indexable" + // FieldPrivacyLevel holds the string denoting the privacylevel field in the database. + FieldPrivacyLevel = "privacy_level" + // FieldFields holds the string denoting the fields field in the database. + FieldFields = "fields" + // FieldInbox holds the string denoting the inbox field in the database. + FieldInbox = "inbox" + // FieldFeatured holds the string denoting the featured field in the database. + FieldFeatured = "featured" + // FieldFollowers holds the string denoting the followers field in the database. + FieldFollowers = "followers" + // FieldFollowing holds the string denoting the following field in the database. + FieldFollowing = "following" + // FieldOutbox holds the string denoting the outbox field in the database. + FieldOutbox = "outbox" + // EdgeAvatarImage holds the string denoting the avatarimage edge name in mutations. + EdgeAvatarImage = "avatarImage" + // EdgeHeaderImage holds the string denoting the headerimage edge name in mutations. + EdgeHeaderImage = "headerImage" + // EdgeAuthoredNotes holds the string denoting the authorednotes edge name in mutations. + EdgeAuthoredNotes = "authoredNotes" + // EdgeMentionedNotes holds the string denoting the mentionednotes edge name in mutations. + EdgeMentionedNotes = "mentionedNotes" + // Table holds the table name of the user in the database. + Table = "users" + // AvatarImageTable is the table that holds the avatarImage relation/edge. + AvatarImageTable = "users" + // AvatarImageInverseTable is the table name for the Image entity. + // It exists in this package in order to avoid circular dependency with the "image" package. + AvatarImageInverseTable = "images" + // AvatarImageColumn is the table column denoting the avatarImage relation/edge. + AvatarImageColumn = "user_avatar_image" + // HeaderImageTable is the table that holds the headerImage relation/edge. + HeaderImageTable = "users" + // HeaderImageInverseTable is the table name for the Image entity. + // It exists in this package in order to avoid circular dependency with the "image" package. + HeaderImageInverseTable = "images" + // HeaderImageColumn is the table column denoting the headerImage relation/edge. + HeaderImageColumn = "user_header_image" + // AuthoredNotesTable is the table that holds the authoredNotes relation/edge. + AuthoredNotesTable = "notes" + // AuthoredNotesInverseTable is the table name for the Note entity. + // It exists in this package in order to avoid circular dependency with the "note" package. + AuthoredNotesInverseTable = "notes" + // AuthoredNotesColumn is the table column denoting the authoredNotes relation/edge. + AuthoredNotesColumn = "note_author" + // MentionedNotesTable is the table that holds the mentionedNotes relation/edge. The primary key declared below. + MentionedNotesTable = "note_mentions" + // MentionedNotesInverseTable is the table name for the Note entity. + // It exists in this package in order to avoid circular dependency with the "note" package. + MentionedNotesInverseTable = "notes" +) + +// Columns holds all SQL columns for user fields. +var Columns = []string{ + FieldID, + FieldIsRemote, + FieldURI, + FieldExtensions, + FieldCreatedAt, + FieldUpdatedAt, + FieldUsername, + FieldPasswordHash, + FieldDisplayName, + FieldBiography, + FieldPublicKey, + FieldPrivateKey, + FieldIndexable, + FieldPrivacyLevel, + FieldFields, + FieldInbox, + FieldFeatured, + FieldFollowers, + FieldFollowing, + FieldOutbox, +} + +// ForeignKeys holds the SQL foreign-keys that are owned by the "users" +// table and are not defined as standalone fields in the schema. +var ForeignKeys = []string{ + "user_avatar_image", + "user_header_image", +} + +var ( + // MentionedNotesPrimaryKey and MentionedNotesColumn2 are the table columns denoting the + // primary key for the mentionedNotes relation (M2M). + MentionedNotesPrimaryKey = []string{"note_id", "user_id"} +) + +// ValidColumn reports if the column name is valid (part of the table columns). +func ValidColumn(column string) bool { + for i := range Columns { + if column == Columns[i] { + return true + } + } + for i := range ForeignKeys { + if column == ForeignKeys[i] { + return true + } + } + return false +} + +var ( + // URIValidator is a validator for the "uri" field. It is called by the builders before save. + URIValidator func(string) error + // DefaultExtensions holds the default value on creation for the "extensions" field. + DefaultExtensions lysand.Extensions + // DefaultCreatedAt holds the default value on creation for the "created_at" field. + DefaultCreatedAt func() time.Time + // DefaultUpdatedAt holds the default value on creation for the "updated_at" field. + DefaultUpdatedAt func() time.Time + // UpdateDefaultUpdatedAt holds the default value on update for the "updated_at" field. + UpdateDefaultUpdatedAt func() time.Time + // UsernameValidator is a validator for the "username" field. It is called by the builders before save. + UsernameValidator func(string) error + // DisplayNameValidator is a validator for the "displayName" field. It is called by the builders before save. + DisplayNameValidator func(string) error + // DefaultIndexable holds the default value on creation for the "indexable" field. + DefaultIndexable bool + // DefaultFields holds the default value on creation for the "fields" field. + DefaultFields []lysand.Field + // InboxValidator is a validator for the "inbox" field. It is called by the builders before save. + InboxValidator func(string) error + // FeaturedValidator is a validator for the "featured" field. It is called by the builders before save. + FeaturedValidator func(string) error + // FollowersValidator is a validator for the "followers" field. It is called by the builders before save. + FollowersValidator func(string) error + // FollowingValidator is a validator for the "following" field. It is called by the builders before save. + FollowingValidator func(string) error + // OutboxValidator is a validator for the "outbox" field. It is called by the builders before save. + OutboxValidator func(string) error + // DefaultID holds the default value on creation for the "id" field. + DefaultID func() uuid.UUID +) + +// PrivacyLevel defines the type for the "privacyLevel" enum field. +type PrivacyLevel string + +// PrivacyLevelPublic is the default value of the PrivacyLevel enum. +const DefaultPrivacyLevel = PrivacyLevelPublic + +// PrivacyLevel values. +const ( + PrivacyLevelPublic PrivacyLevel = "public" + PrivacyLevelRestricted PrivacyLevel = "restricted" + PrivacyLevelPrivate PrivacyLevel = "private" +) + +func (pl PrivacyLevel) String() string { + return string(pl) +} + +// PrivacyLevelValidator is a validator for the "privacyLevel" field enum values. It is called by the builders before save. +func PrivacyLevelValidator(pl PrivacyLevel) error { + switch pl { + case PrivacyLevelPublic, PrivacyLevelRestricted, PrivacyLevelPrivate: + return nil + default: + return fmt.Errorf("user: invalid enum value for privacyLevel field: %q", pl) + } +} + +// OrderOption defines the ordering options for the User queries. +type OrderOption func(*sql.Selector) + +// ByID orders the results by the id field. +func ByID(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldID, opts...).ToFunc() +} + +// ByIsRemote orders the results by the isRemote field. +func ByIsRemote(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIsRemote, opts...).ToFunc() +} + +// ByURI orders the results by the uri field. +func ByURI(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldURI, opts...).ToFunc() +} + +// ByCreatedAt orders the results by the created_at field. +func ByCreatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldCreatedAt, opts...).ToFunc() +} + +// ByUpdatedAt orders the results by the updated_at field. +func ByUpdatedAt(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUpdatedAt, opts...).ToFunc() +} + +// ByUsername orders the results by the username field. +func ByUsername(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldUsername, opts...).ToFunc() +} + +// ByDisplayName orders the results by the displayName field. +func ByDisplayName(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldDisplayName, opts...).ToFunc() +} + +// ByBiography orders the results by the biography field. +func ByBiography(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldBiography, opts...).ToFunc() +} + +// ByIndexable orders the results by the indexable field. +func ByIndexable(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldIndexable, opts...).ToFunc() +} + +// ByPrivacyLevel orders the results by the privacyLevel field. +func ByPrivacyLevel(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldPrivacyLevel, opts...).ToFunc() +} + +// ByInbox orders the results by the inbox field. +func ByInbox(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldInbox, opts...).ToFunc() +} + +// ByFeatured orders the results by the featured field. +func ByFeatured(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFeatured, opts...).ToFunc() +} + +// ByFollowers orders the results by the followers field. +func ByFollowers(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFollowers, opts...).ToFunc() +} + +// ByFollowing orders the results by the following field. +func ByFollowing(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldFollowing, opts...).ToFunc() +} + +// ByOutbox orders the results by the outbox field. +func ByOutbox(opts ...sql.OrderTermOption) OrderOption { + return sql.OrderByField(FieldOutbox, opts...).ToFunc() +} + +// ByAvatarImageField orders the results by avatarImage field. +func ByAvatarImageField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAvatarImageStep(), sql.OrderByField(field, opts...)) + } +} + +// ByHeaderImageField orders the results by headerImage field. +func ByHeaderImageField(field string, opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newHeaderImageStep(), sql.OrderByField(field, opts...)) + } +} + +// ByAuthoredNotesCount orders the results by authoredNotes count. +func ByAuthoredNotesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newAuthoredNotesStep(), opts...) + } +} + +// ByAuthoredNotes orders the results by authoredNotes terms. +func ByAuthoredNotes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newAuthoredNotesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} + +// ByMentionedNotesCount orders the results by mentionedNotes count. +func ByMentionedNotesCount(opts ...sql.OrderTermOption) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborsCount(s, newMentionedNotesStep(), opts...) + } +} + +// ByMentionedNotes orders the results by mentionedNotes terms. +func ByMentionedNotes(term sql.OrderTerm, terms ...sql.OrderTerm) OrderOption { + return func(s *sql.Selector) { + sqlgraph.OrderByNeighborTerms(s, newMentionedNotesStep(), append([]sql.OrderTerm{term}, terms...)...) + } +} +func newAvatarImageStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AvatarImageInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AvatarImageTable, AvatarImageColumn), + ) +} +func newHeaderImageStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(HeaderImageInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, HeaderImageTable, HeaderImageColumn), + ) +} +func newAuthoredNotesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(AuthoredNotesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AuthoredNotesTable, AuthoredNotesColumn), + ) +} +func newMentionedNotesStep() *sqlgraph.Step { + return sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.To(MentionedNotesInverseTable, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, MentionedNotesTable, MentionedNotesPrimaryKey...), + ) +} diff --git a/ent/user/where.go b/ent/user/where.go new file mode 100644 index 0000000..085dc0a --- /dev/null +++ b/ent/user/where.go @@ -0,0 +1,1140 @@ +// Code generated by ent, DO NOT EDIT. + +package user + +import ( + "crypto/ed25519" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/predicate" +) + +// ID filters vertices based on their ID field. +func ID(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDEQ applies the EQ predicate on the ID field. +func IDEQ(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldEQ(FieldID, id)) +} + +// IDNEQ applies the NEQ predicate on the ID field. +func IDNEQ(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldNEQ(FieldID, id)) +} + +// IDIn applies the In predicate on the ID field. +func IDIn(ids ...uuid.UUID) predicate.User { + return predicate.User(sql.FieldIn(FieldID, ids...)) +} + +// IDNotIn applies the NotIn predicate on the ID field. +func IDNotIn(ids ...uuid.UUID) predicate.User { + return predicate.User(sql.FieldNotIn(FieldID, ids...)) +} + +// IDGT applies the GT predicate on the ID field. +func IDGT(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldGT(FieldID, id)) +} + +// IDGTE applies the GTE predicate on the ID field. +func IDGTE(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldGTE(FieldID, id)) +} + +// IDLT applies the LT predicate on the ID field. +func IDLT(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldLT(FieldID, id)) +} + +// IDLTE applies the LTE predicate on the ID field. +func IDLTE(id uuid.UUID) predicate.User { + return predicate.User(sql.FieldLTE(FieldID, id)) +} + +// IsRemote applies equality check predicate on the "isRemote" field. It's identical to IsRemoteEQ. +func IsRemote(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIsRemote, v)) +} + +// URI applies equality check predicate on the "uri" field. It's identical to URIEQ. +func URI(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldURI, v)) +} + +// CreatedAt applies equality check predicate on the "created_at" field. It's identical to CreatedAtEQ. +func CreatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// UpdatedAt applies equality check predicate on the "updated_at" field. It's identical to UpdatedAtEQ. +func UpdatedAt(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// Username applies equality check predicate on the "username" field. It's identical to UsernameEQ. +func Username(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldUsername, v)) +} + +// PasswordHash applies equality check predicate on the "passwordHash" field. It's identical to PasswordHashEQ. +func PasswordHash(v []byte) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// DisplayName applies equality check predicate on the "displayName" field. It's identical to DisplayNameEQ. +func DisplayName(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldDisplayName, v)) +} + +// Biography applies equality check predicate on the "biography" field. It's identical to BiographyEQ. +func Biography(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldBiography, v)) +} + +// PublicKey applies equality check predicate on the "publicKey" field. It's identical to PublicKeyEQ. +func PublicKey(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldEQ(FieldPublicKey, vc)) +} + +// PrivateKey applies equality check predicate on the "privateKey" field. It's identical to PrivateKeyEQ. +func PrivateKey(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldEQ(FieldPrivateKey, vc)) +} + +// Indexable applies equality check predicate on the "indexable" field. It's identical to IndexableEQ. +func Indexable(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIndexable, v)) +} + +// Inbox applies equality check predicate on the "inbox" field. It's identical to InboxEQ. +func Inbox(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldInbox, v)) +} + +// Featured applies equality check predicate on the "featured" field. It's identical to FeaturedEQ. +func Featured(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldFeatured, v)) +} + +// Followers applies equality check predicate on the "followers" field. It's identical to FollowersEQ. +func Followers(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldFollowers, v)) +} + +// Following applies equality check predicate on the "following" field. It's identical to FollowingEQ. +func Following(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldFollowing, v)) +} + +// Outbox applies equality check predicate on the "outbox" field. It's identical to OutboxEQ. +func Outbox(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldOutbox, v)) +} + +// IsRemoteEQ applies the EQ predicate on the "isRemote" field. +func IsRemoteEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIsRemote, v)) +} + +// IsRemoteNEQ applies the NEQ predicate on the "isRemote" field. +func IsRemoteNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldIsRemote, v)) +} + +// URIEQ applies the EQ predicate on the "uri" field. +func URIEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldURI, v)) +} + +// URINEQ applies the NEQ predicate on the "uri" field. +func URINEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldURI, v)) +} + +// URIIn applies the In predicate on the "uri" field. +func URIIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldURI, vs...)) +} + +// URINotIn applies the NotIn predicate on the "uri" field. +func URINotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldURI, vs...)) +} + +// URIGT applies the GT predicate on the "uri" field. +func URIGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldURI, v)) +} + +// URIGTE applies the GTE predicate on the "uri" field. +func URIGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldURI, v)) +} + +// URILT applies the LT predicate on the "uri" field. +func URILT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldURI, v)) +} + +// URILTE applies the LTE predicate on the "uri" field. +func URILTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldURI, v)) +} + +// URIContains applies the Contains predicate on the "uri" field. +func URIContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldURI, v)) +} + +// URIHasPrefix applies the HasPrefix predicate on the "uri" field. +func URIHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldURI, v)) +} + +// URIHasSuffix applies the HasSuffix predicate on the "uri" field. +func URIHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldURI, v)) +} + +// URIEqualFold applies the EqualFold predicate on the "uri" field. +func URIEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldURI, v)) +} + +// URIContainsFold applies the ContainsFold predicate on the "uri" field. +func URIContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldURI, v)) +} + +// CreatedAtEQ applies the EQ predicate on the "created_at" field. +func CreatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldCreatedAt, v)) +} + +// CreatedAtNEQ applies the NEQ predicate on the "created_at" field. +func CreatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldCreatedAt, v)) +} + +// CreatedAtIn applies the In predicate on the "created_at" field. +func CreatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldCreatedAt, vs...)) +} + +// CreatedAtNotIn applies the NotIn predicate on the "created_at" field. +func CreatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldCreatedAt, vs...)) +} + +// CreatedAtGT applies the GT predicate on the "created_at" field. +func CreatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldCreatedAt, v)) +} + +// CreatedAtGTE applies the GTE predicate on the "created_at" field. +func CreatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldCreatedAt, v)) +} + +// CreatedAtLT applies the LT predicate on the "created_at" field. +func CreatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldCreatedAt, v)) +} + +// CreatedAtLTE applies the LTE predicate on the "created_at" field. +func CreatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldCreatedAt, v)) +} + +// UpdatedAtEQ applies the EQ predicate on the "updated_at" field. +func UpdatedAtEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtNEQ applies the NEQ predicate on the "updated_at" field. +func UpdatedAtNEQ(v time.Time) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUpdatedAt, v)) +} + +// UpdatedAtIn applies the In predicate on the "updated_at" field. +func UpdatedAtIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtNotIn applies the NotIn predicate on the "updated_at" field. +func UpdatedAtNotIn(vs ...time.Time) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUpdatedAt, vs...)) +} + +// UpdatedAtGT applies the GT predicate on the "updated_at" field. +func UpdatedAtGT(v time.Time) predicate.User { + return predicate.User(sql.FieldGT(FieldUpdatedAt, v)) +} + +// UpdatedAtGTE applies the GTE predicate on the "updated_at" field. +func UpdatedAtGTE(v time.Time) predicate.User { + return predicate.User(sql.FieldGTE(FieldUpdatedAt, v)) +} + +// UpdatedAtLT applies the LT predicate on the "updated_at" field. +func UpdatedAtLT(v time.Time) predicate.User { + return predicate.User(sql.FieldLT(FieldUpdatedAt, v)) +} + +// UpdatedAtLTE applies the LTE predicate on the "updated_at" field. +func UpdatedAtLTE(v time.Time) predicate.User { + return predicate.User(sql.FieldLTE(FieldUpdatedAt, v)) +} + +// UsernameEQ applies the EQ predicate on the "username" field. +func UsernameEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldUsername, v)) +} + +// UsernameNEQ applies the NEQ predicate on the "username" field. +func UsernameNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldUsername, v)) +} + +// UsernameIn applies the In predicate on the "username" field. +func UsernameIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldUsername, vs...)) +} + +// UsernameNotIn applies the NotIn predicate on the "username" field. +func UsernameNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldUsername, vs...)) +} + +// UsernameGT applies the GT predicate on the "username" field. +func UsernameGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldUsername, v)) +} + +// UsernameGTE applies the GTE predicate on the "username" field. +func UsernameGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldUsername, v)) +} + +// UsernameLT applies the LT predicate on the "username" field. +func UsernameLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldUsername, v)) +} + +// UsernameLTE applies the LTE predicate on the "username" field. +func UsernameLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldUsername, v)) +} + +// UsernameContains applies the Contains predicate on the "username" field. +func UsernameContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldUsername, v)) +} + +// UsernameHasPrefix applies the HasPrefix predicate on the "username" field. +func UsernameHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldUsername, v)) +} + +// UsernameHasSuffix applies the HasSuffix predicate on the "username" field. +func UsernameHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldUsername, v)) +} + +// UsernameEqualFold applies the EqualFold predicate on the "username" field. +func UsernameEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldUsername, v)) +} + +// UsernameContainsFold applies the ContainsFold predicate on the "username" field. +func UsernameContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldUsername, v)) +} + +// PasswordHashEQ applies the EQ predicate on the "passwordHash" field. +func PasswordHashEQ(v []byte) predicate.User { + return predicate.User(sql.FieldEQ(FieldPasswordHash, v)) +} + +// PasswordHashNEQ applies the NEQ predicate on the "passwordHash" field. +func PasswordHashNEQ(v []byte) predicate.User { + return predicate.User(sql.FieldNEQ(FieldPasswordHash, v)) +} + +// PasswordHashIn applies the In predicate on the "passwordHash" field. +func PasswordHashIn(vs ...[]byte) predicate.User { + return predicate.User(sql.FieldIn(FieldPasswordHash, vs...)) +} + +// PasswordHashNotIn applies the NotIn predicate on the "passwordHash" field. +func PasswordHashNotIn(vs ...[]byte) predicate.User { + return predicate.User(sql.FieldNotIn(FieldPasswordHash, vs...)) +} + +// PasswordHashGT applies the GT predicate on the "passwordHash" field. +func PasswordHashGT(v []byte) predicate.User { + return predicate.User(sql.FieldGT(FieldPasswordHash, v)) +} + +// PasswordHashGTE applies the GTE predicate on the "passwordHash" field. +func PasswordHashGTE(v []byte) predicate.User { + return predicate.User(sql.FieldGTE(FieldPasswordHash, v)) +} + +// PasswordHashLT applies the LT predicate on the "passwordHash" field. +func PasswordHashLT(v []byte) predicate.User { + return predicate.User(sql.FieldLT(FieldPasswordHash, v)) +} + +// PasswordHashLTE applies the LTE predicate on the "passwordHash" field. +func PasswordHashLTE(v []byte) predicate.User { + return predicate.User(sql.FieldLTE(FieldPasswordHash, v)) +} + +// PasswordHashIsNil applies the IsNil predicate on the "passwordHash" field. +func PasswordHashIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldPasswordHash)) +} + +// PasswordHashNotNil applies the NotNil predicate on the "passwordHash" field. +func PasswordHashNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldPasswordHash)) +} + +// DisplayNameEQ applies the EQ predicate on the "displayName" field. +func DisplayNameEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldDisplayName, v)) +} + +// DisplayNameNEQ applies the NEQ predicate on the "displayName" field. +func DisplayNameNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldDisplayName, v)) +} + +// DisplayNameIn applies the In predicate on the "displayName" field. +func DisplayNameIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldDisplayName, vs...)) +} + +// DisplayNameNotIn applies the NotIn predicate on the "displayName" field. +func DisplayNameNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldDisplayName, vs...)) +} + +// DisplayNameGT applies the GT predicate on the "displayName" field. +func DisplayNameGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldDisplayName, v)) +} + +// DisplayNameGTE applies the GTE predicate on the "displayName" field. +func DisplayNameGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldDisplayName, v)) +} + +// DisplayNameLT applies the LT predicate on the "displayName" field. +func DisplayNameLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldDisplayName, v)) +} + +// DisplayNameLTE applies the LTE predicate on the "displayName" field. +func DisplayNameLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldDisplayName, v)) +} + +// DisplayNameContains applies the Contains predicate on the "displayName" field. +func DisplayNameContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldDisplayName, v)) +} + +// DisplayNameHasPrefix applies the HasPrefix predicate on the "displayName" field. +func DisplayNameHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldDisplayName, v)) +} + +// DisplayNameHasSuffix applies the HasSuffix predicate on the "displayName" field. +func DisplayNameHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldDisplayName, v)) +} + +// DisplayNameIsNil applies the IsNil predicate on the "displayName" field. +func DisplayNameIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldDisplayName)) +} + +// DisplayNameNotNil applies the NotNil predicate on the "displayName" field. +func DisplayNameNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldDisplayName)) +} + +// DisplayNameEqualFold applies the EqualFold predicate on the "displayName" field. +func DisplayNameEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldDisplayName, v)) +} + +// DisplayNameContainsFold applies the ContainsFold predicate on the "displayName" field. +func DisplayNameContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldDisplayName, v)) +} + +// BiographyEQ applies the EQ predicate on the "biography" field. +func BiographyEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldBiography, v)) +} + +// BiographyNEQ applies the NEQ predicate on the "biography" field. +func BiographyNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldBiography, v)) +} + +// BiographyIn applies the In predicate on the "biography" field. +func BiographyIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldBiography, vs...)) +} + +// BiographyNotIn applies the NotIn predicate on the "biography" field. +func BiographyNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldBiography, vs...)) +} + +// BiographyGT applies the GT predicate on the "biography" field. +func BiographyGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldBiography, v)) +} + +// BiographyGTE applies the GTE predicate on the "biography" field. +func BiographyGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldBiography, v)) +} + +// BiographyLT applies the LT predicate on the "biography" field. +func BiographyLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldBiography, v)) +} + +// BiographyLTE applies the LTE predicate on the "biography" field. +func BiographyLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldBiography, v)) +} + +// BiographyContains applies the Contains predicate on the "biography" field. +func BiographyContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldBiography, v)) +} + +// BiographyHasPrefix applies the HasPrefix predicate on the "biography" field. +func BiographyHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldBiography, v)) +} + +// BiographyHasSuffix applies the HasSuffix predicate on the "biography" field. +func BiographyHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldBiography, v)) +} + +// BiographyIsNil applies the IsNil predicate on the "biography" field. +func BiographyIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldBiography)) +} + +// BiographyNotNil applies the NotNil predicate on the "biography" field. +func BiographyNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldBiography)) +} + +// BiographyEqualFold applies the EqualFold predicate on the "biography" field. +func BiographyEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldBiography, v)) +} + +// BiographyContainsFold applies the ContainsFold predicate on the "biography" field. +func BiographyContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldBiography, v)) +} + +// PublicKeyEQ applies the EQ predicate on the "publicKey" field. +func PublicKeyEQ(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldEQ(FieldPublicKey, vc)) +} + +// PublicKeyNEQ applies the NEQ predicate on the "publicKey" field. +func PublicKeyNEQ(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldNEQ(FieldPublicKey, vc)) +} + +// PublicKeyIn applies the In predicate on the "publicKey" field. +func PublicKeyIn(vs ...ed25519.PublicKey) predicate.User { + v := make([]any, len(vs)) + for i := range v { + v[i] = []byte(vs[i]) + } + return predicate.User(sql.FieldIn(FieldPublicKey, v...)) +} + +// PublicKeyNotIn applies the NotIn predicate on the "publicKey" field. +func PublicKeyNotIn(vs ...ed25519.PublicKey) predicate.User { + v := make([]any, len(vs)) + for i := range v { + v[i] = []byte(vs[i]) + } + return predicate.User(sql.FieldNotIn(FieldPublicKey, v...)) +} + +// PublicKeyGT applies the GT predicate on the "publicKey" field. +func PublicKeyGT(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldGT(FieldPublicKey, vc)) +} + +// PublicKeyGTE applies the GTE predicate on the "publicKey" field. +func PublicKeyGTE(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldGTE(FieldPublicKey, vc)) +} + +// PublicKeyLT applies the LT predicate on the "publicKey" field. +func PublicKeyLT(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldLT(FieldPublicKey, vc)) +} + +// PublicKeyLTE applies the LTE predicate on the "publicKey" field. +func PublicKeyLTE(v ed25519.PublicKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldLTE(FieldPublicKey, vc)) +} + +// PrivateKeyEQ applies the EQ predicate on the "privateKey" field. +func PrivateKeyEQ(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldEQ(FieldPrivateKey, vc)) +} + +// PrivateKeyNEQ applies the NEQ predicate on the "privateKey" field. +func PrivateKeyNEQ(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldNEQ(FieldPrivateKey, vc)) +} + +// PrivateKeyIn applies the In predicate on the "privateKey" field. +func PrivateKeyIn(vs ...ed25519.PrivateKey) predicate.User { + v := make([]any, len(vs)) + for i := range v { + v[i] = []byte(vs[i]) + } + return predicate.User(sql.FieldIn(FieldPrivateKey, v...)) +} + +// PrivateKeyNotIn applies the NotIn predicate on the "privateKey" field. +func PrivateKeyNotIn(vs ...ed25519.PrivateKey) predicate.User { + v := make([]any, len(vs)) + for i := range v { + v[i] = []byte(vs[i]) + } + return predicate.User(sql.FieldNotIn(FieldPrivateKey, v...)) +} + +// PrivateKeyGT applies the GT predicate on the "privateKey" field. +func PrivateKeyGT(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldGT(FieldPrivateKey, vc)) +} + +// PrivateKeyGTE applies the GTE predicate on the "privateKey" field. +func PrivateKeyGTE(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldGTE(FieldPrivateKey, vc)) +} + +// PrivateKeyLT applies the LT predicate on the "privateKey" field. +func PrivateKeyLT(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldLT(FieldPrivateKey, vc)) +} + +// PrivateKeyLTE applies the LTE predicate on the "privateKey" field. +func PrivateKeyLTE(v ed25519.PrivateKey) predicate.User { + vc := []byte(v) + return predicate.User(sql.FieldLTE(FieldPrivateKey, vc)) +} + +// PrivateKeyIsNil applies the IsNil predicate on the "privateKey" field. +func PrivateKeyIsNil() predicate.User { + return predicate.User(sql.FieldIsNull(FieldPrivateKey)) +} + +// PrivateKeyNotNil applies the NotNil predicate on the "privateKey" field. +func PrivateKeyNotNil() predicate.User { + return predicate.User(sql.FieldNotNull(FieldPrivateKey)) +} + +// IndexableEQ applies the EQ predicate on the "indexable" field. +func IndexableEQ(v bool) predicate.User { + return predicate.User(sql.FieldEQ(FieldIndexable, v)) +} + +// IndexableNEQ applies the NEQ predicate on the "indexable" field. +func IndexableNEQ(v bool) predicate.User { + return predicate.User(sql.FieldNEQ(FieldIndexable, v)) +} + +// PrivacyLevelEQ applies the EQ predicate on the "privacyLevel" field. +func PrivacyLevelEQ(v PrivacyLevel) predicate.User { + return predicate.User(sql.FieldEQ(FieldPrivacyLevel, v)) +} + +// PrivacyLevelNEQ applies the NEQ predicate on the "privacyLevel" field. +func PrivacyLevelNEQ(v PrivacyLevel) predicate.User { + return predicate.User(sql.FieldNEQ(FieldPrivacyLevel, v)) +} + +// PrivacyLevelIn applies the In predicate on the "privacyLevel" field. +func PrivacyLevelIn(vs ...PrivacyLevel) predicate.User { + return predicate.User(sql.FieldIn(FieldPrivacyLevel, vs...)) +} + +// PrivacyLevelNotIn applies the NotIn predicate on the "privacyLevel" field. +func PrivacyLevelNotIn(vs ...PrivacyLevel) predicate.User { + return predicate.User(sql.FieldNotIn(FieldPrivacyLevel, vs...)) +} + +// InboxEQ applies the EQ predicate on the "inbox" field. +func InboxEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldInbox, v)) +} + +// InboxNEQ applies the NEQ predicate on the "inbox" field. +func InboxNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldInbox, v)) +} + +// InboxIn applies the In predicate on the "inbox" field. +func InboxIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldInbox, vs...)) +} + +// InboxNotIn applies the NotIn predicate on the "inbox" field. +func InboxNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldInbox, vs...)) +} + +// InboxGT applies the GT predicate on the "inbox" field. +func InboxGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldInbox, v)) +} + +// InboxGTE applies the GTE predicate on the "inbox" field. +func InboxGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldInbox, v)) +} + +// InboxLT applies the LT predicate on the "inbox" field. +func InboxLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldInbox, v)) +} + +// InboxLTE applies the LTE predicate on the "inbox" field. +func InboxLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldInbox, v)) +} + +// InboxContains applies the Contains predicate on the "inbox" field. +func InboxContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldInbox, v)) +} + +// InboxHasPrefix applies the HasPrefix predicate on the "inbox" field. +func InboxHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldInbox, v)) +} + +// InboxHasSuffix applies the HasSuffix predicate on the "inbox" field. +func InboxHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldInbox, v)) +} + +// InboxEqualFold applies the EqualFold predicate on the "inbox" field. +func InboxEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldInbox, v)) +} + +// InboxContainsFold applies the ContainsFold predicate on the "inbox" field. +func InboxContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldInbox, v)) +} + +// FeaturedEQ applies the EQ predicate on the "featured" field. +func FeaturedEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldFeatured, v)) +} + +// FeaturedNEQ applies the NEQ predicate on the "featured" field. +func FeaturedNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldFeatured, v)) +} + +// FeaturedIn applies the In predicate on the "featured" field. +func FeaturedIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldFeatured, vs...)) +} + +// FeaturedNotIn applies the NotIn predicate on the "featured" field. +func FeaturedNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldFeatured, vs...)) +} + +// FeaturedGT applies the GT predicate on the "featured" field. +func FeaturedGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldFeatured, v)) +} + +// FeaturedGTE applies the GTE predicate on the "featured" field. +func FeaturedGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldFeatured, v)) +} + +// FeaturedLT applies the LT predicate on the "featured" field. +func FeaturedLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldFeatured, v)) +} + +// FeaturedLTE applies the LTE predicate on the "featured" field. +func FeaturedLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldFeatured, v)) +} + +// FeaturedContains applies the Contains predicate on the "featured" field. +func FeaturedContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldFeatured, v)) +} + +// FeaturedHasPrefix applies the HasPrefix predicate on the "featured" field. +func FeaturedHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldFeatured, v)) +} + +// FeaturedHasSuffix applies the HasSuffix predicate on the "featured" field. +func FeaturedHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldFeatured, v)) +} + +// FeaturedEqualFold applies the EqualFold predicate on the "featured" field. +func FeaturedEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldFeatured, v)) +} + +// FeaturedContainsFold applies the ContainsFold predicate on the "featured" field. +func FeaturedContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldFeatured, v)) +} + +// FollowersEQ applies the EQ predicate on the "followers" field. +func FollowersEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldFollowers, v)) +} + +// FollowersNEQ applies the NEQ predicate on the "followers" field. +func FollowersNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldFollowers, v)) +} + +// FollowersIn applies the In predicate on the "followers" field. +func FollowersIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldFollowers, vs...)) +} + +// FollowersNotIn applies the NotIn predicate on the "followers" field. +func FollowersNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldFollowers, vs...)) +} + +// FollowersGT applies the GT predicate on the "followers" field. +func FollowersGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldFollowers, v)) +} + +// FollowersGTE applies the GTE predicate on the "followers" field. +func FollowersGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldFollowers, v)) +} + +// FollowersLT applies the LT predicate on the "followers" field. +func FollowersLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldFollowers, v)) +} + +// FollowersLTE applies the LTE predicate on the "followers" field. +func FollowersLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldFollowers, v)) +} + +// FollowersContains applies the Contains predicate on the "followers" field. +func FollowersContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldFollowers, v)) +} + +// FollowersHasPrefix applies the HasPrefix predicate on the "followers" field. +func FollowersHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldFollowers, v)) +} + +// FollowersHasSuffix applies the HasSuffix predicate on the "followers" field. +func FollowersHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldFollowers, v)) +} + +// FollowersEqualFold applies the EqualFold predicate on the "followers" field. +func FollowersEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldFollowers, v)) +} + +// FollowersContainsFold applies the ContainsFold predicate on the "followers" field. +func FollowersContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldFollowers, v)) +} + +// FollowingEQ applies the EQ predicate on the "following" field. +func FollowingEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldFollowing, v)) +} + +// FollowingNEQ applies the NEQ predicate on the "following" field. +func FollowingNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldFollowing, v)) +} + +// FollowingIn applies the In predicate on the "following" field. +func FollowingIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldFollowing, vs...)) +} + +// FollowingNotIn applies the NotIn predicate on the "following" field. +func FollowingNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldFollowing, vs...)) +} + +// FollowingGT applies the GT predicate on the "following" field. +func FollowingGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldFollowing, v)) +} + +// FollowingGTE applies the GTE predicate on the "following" field. +func FollowingGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldFollowing, v)) +} + +// FollowingLT applies the LT predicate on the "following" field. +func FollowingLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldFollowing, v)) +} + +// FollowingLTE applies the LTE predicate on the "following" field. +func FollowingLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldFollowing, v)) +} + +// FollowingContains applies the Contains predicate on the "following" field. +func FollowingContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldFollowing, v)) +} + +// FollowingHasPrefix applies the HasPrefix predicate on the "following" field. +func FollowingHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldFollowing, v)) +} + +// FollowingHasSuffix applies the HasSuffix predicate on the "following" field. +func FollowingHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldFollowing, v)) +} + +// FollowingEqualFold applies the EqualFold predicate on the "following" field. +func FollowingEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldFollowing, v)) +} + +// FollowingContainsFold applies the ContainsFold predicate on the "following" field. +func FollowingContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldFollowing, v)) +} + +// OutboxEQ applies the EQ predicate on the "outbox" field. +func OutboxEQ(v string) predicate.User { + return predicate.User(sql.FieldEQ(FieldOutbox, v)) +} + +// OutboxNEQ applies the NEQ predicate on the "outbox" field. +func OutboxNEQ(v string) predicate.User { + return predicate.User(sql.FieldNEQ(FieldOutbox, v)) +} + +// OutboxIn applies the In predicate on the "outbox" field. +func OutboxIn(vs ...string) predicate.User { + return predicate.User(sql.FieldIn(FieldOutbox, vs...)) +} + +// OutboxNotIn applies the NotIn predicate on the "outbox" field. +func OutboxNotIn(vs ...string) predicate.User { + return predicate.User(sql.FieldNotIn(FieldOutbox, vs...)) +} + +// OutboxGT applies the GT predicate on the "outbox" field. +func OutboxGT(v string) predicate.User { + return predicate.User(sql.FieldGT(FieldOutbox, v)) +} + +// OutboxGTE applies the GTE predicate on the "outbox" field. +func OutboxGTE(v string) predicate.User { + return predicate.User(sql.FieldGTE(FieldOutbox, v)) +} + +// OutboxLT applies the LT predicate on the "outbox" field. +func OutboxLT(v string) predicate.User { + return predicate.User(sql.FieldLT(FieldOutbox, v)) +} + +// OutboxLTE applies the LTE predicate on the "outbox" field. +func OutboxLTE(v string) predicate.User { + return predicate.User(sql.FieldLTE(FieldOutbox, v)) +} + +// OutboxContains applies the Contains predicate on the "outbox" field. +func OutboxContains(v string) predicate.User { + return predicate.User(sql.FieldContains(FieldOutbox, v)) +} + +// OutboxHasPrefix applies the HasPrefix predicate on the "outbox" field. +func OutboxHasPrefix(v string) predicate.User { + return predicate.User(sql.FieldHasPrefix(FieldOutbox, v)) +} + +// OutboxHasSuffix applies the HasSuffix predicate on the "outbox" field. +func OutboxHasSuffix(v string) predicate.User { + return predicate.User(sql.FieldHasSuffix(FieldOutbox, v)) +} + +// OutboxEqualFold applies the EqualFold predicate on the "outbox" field. +func OutboxEqualFold(v string) predicate.User { + return predicate.User(sql.FieldEqualFold(FieldOutbox, v)) +} + +// OutboxContainsFold applies the ContainsFold predicate on the "outbox" field. +func OutboxContainsFold(v string) predicate.User { + return predicate.User(sql.FieldContainsFold(FieldOutbox, v)) +} + +// HasAvatarImage applies the HasEdge predicate on the "avatarImage" edge. +func HasAvatarImage() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, AvatarImageTable, AvatarImageColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAvatarImageWith applies the HasEdge predicate on the "avatarImage" edge with a given conditions (other predicates). +func HasAvatarImageWith(preds ...predicate.Image) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAvatarImageStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasHeaderImage applies the HasEdge predicate on the "headerImage" edge. +func HasHeaderImage() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, HeaderImageTable, HeaderImageColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasHeaderImageWith applies the HasEdge predicate on the "headerImage" edge with a given conditions (other predicates). +func HasHeaderImageWith(preds ...predicate.Image) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newHeaderImageStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasAuthoredNotes applies the HasEdge predicate on the "authoredNotes" edge. +func HasAuthoredNotes() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, AuthoredNotesTable, AuthoredNotesColumn), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasAuthoredNotesWith applies the HasEdge predicate on the "authoredNotes" edge with a given conditions (other predicates). +func HasAuthoredNotesWith(preds ...predicate.Note) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newAuthoredNotesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// HasMentionedNotes applies the HasEdge predicate on the "mentionedNotes" edge. +func HasMentionedNotes() predicate.User { + return predicate.User(func(s *sql.Selector) { + step := sqlgraph.NewStep( + sqlgraph.From(Table, FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, MentionedNotesTable, MentionedNotesPrimaryKey...), + ) + sqlgraph.HasNeighbors(s, step) + }) +} + +// HasMentionedNotesWith applies the HasEdge predicate on the "mentionedNotes" edge with a given conditions (other predicates). +func HasMentionedNotesWith(preds ...predicate.Note) predicate.User { + return predicate.User(func(s *sql.Selector) { + step := newMentionedNotesStep() + sqlgraph.HasNeighborsWith(s, step, func(s *sql.Selector) { + for _, p := range preds { + p(s) + } + }) + }) +} + +// And groups predicates with the AND operator between them. +func And(predicates ...predicate.User) predicate.User { + return predicate.User(sql.AndPredicates(predicates...)) +} + +// Or groups predicates with the OR operator between them. +func Or(predicates ...predicate.User) predicate.User { + return predicate.User(sql.OrPredicates(predicates...)) +} + +// Not applies the not operator on the given predicate. +func Not(p predicate.User) predicate.User { + return predicate.User(sql.NotPredicates(p)) +} diff --git a/ent/user_create.go b/ent/user_create.go new file mode 100644 index 0000000..d6a4d44 --- /dev/null +++ b/ent/user_create.go @@ -0,0 +1,1752 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "crypto/ed25519" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect" + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// UserCreate is the builder for creating a User entity. +type UserCreate struct { + config + mutation *UserMutation + hooks []Hook + conflict []sql.ConflictOption +} + +// SetIsRemote sets the "isRemote" field. +func (uc *UserCreate) SetIsRemote(b bool) *UserCreate { + uc.mutation.SetIsRemote(b) + return uc +} + +// SetURI sets the "uri" field. +func (uc *UserCreate) SetURI(s string) *UserCreate { + uc.mutation.SetURI(s) + return uc +} + +// SetExtensions sets the "extensions" field. +func (uc *UserCreate) SetExtensions(l lysand.Extensions) *UserCreate { + uc.mutation.SetExtensions(l) + return uc +} + +// SetCreatedAt sets the "created_at" field. +func (uc *UserCreate) SetCreatedAt(t time.Time) *UserCreate { + uc.mutation.SetCreatedAt(t) + return uc +} + +// SetNillableCreatedAt sets the "created_at" field if the given value is not nil. +func (uc *UserCreate) SetNillableCreatedAt(t *time.Time) *UserCreate { + if t != nil { + uc.SetCreatedAt(*t) + } + return uc +} + +// SetUpdatedAt sets the "updated_at" field. +func (uc *UserCreate) SetUpdatedAt(t time.Time) *UserCreate { + uc.mutation.SetUpdatedAt(t) + return uc +} + +// SetNillableUpdatedAt sets the "updated_at" field if the given value is not nil. +func (uc *UserCreate) SetNillableUpdatedAt(t *time.Time) *UserCreate { + if t != nil { + uc.SetUpdatedAt(*t) + } + return uc +} + +// SetUsername sets the "username" field. +func (uc *UserCreate) SetUsername(s string) *UserCreate { + uc.mutation.SetUsername(s) + return uc +} + +// SetPasswordHash sets the "passwordHash" field. +func (uc *UserCreate) SetPasswordHash(b []byte) *UserCreate { + uc.mutation.SetPasswordHash(b) + return uc +} + +// SetDisplayName sets the "displayName" field. +func (uc *UserCreate) SetDisplayName(s string) *UserCreate { + uc.mutation.SetDisplayName(s) + return uc +} + +// SetNillableDisplayName sets the "displayName" field if the given value is not nil. +func (uc *UserCreate) SetNillableDisplayName(s *string) *UserCreate { + if s != nil { + uc.SetDisplayName(*s) + } + return uc +} + +// SetBiography sets the "biography" field. +func (uc *UserCreate) SetBiography(s string) *UserCreate { + uc.mutation.SetBiography(s) + return uc +} + +// SetNillableBiography sets the "biography" field if the given value is not nil. +func (uc *UserCreate) SetNillableBiography(s *string) *UserCreate { + if s != nil { + uc.SetBiography(*s) + } + return uc +} + +// SetPublicKey sets the "publicKey" field. +func (uc *UserCreate) SetPublicKey(ek ed25519.PublicKey) *UserCreate { + uc.mutation.SetPublicKey(ek) + return uc +} + +// SetPrivateKey sets the "privateKey" field. +func (uc *UserCreate) SetPrivateKey(ek ed25519.PrivateKey) *UserCreate { + uc.mutation.SetPrivateKey(ek) + return uc +} + +// SetIndexable sets the "indexable" field. +func (uc *UserCreate) SetIndexable(b bool) *UserCreate { + uc.mutation.SetIndexable(b) + return uc +} + +// SetNillableIndexable sets the "indexable" field if the given value is not nil. +func (uc *UserCreate) SetNillableIndexable(b *bool) *UserCreate { + if b != nil { + uc.SetIndexable(*b) + } + return uc +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (uc *UserCreate) SetPrivacyLevel(ul user.PrivacyLevel) *UserCreate { + uc.mutation.SetPrivacyLevel(ul) + return uc +} + +// SetNillablePrivacyLevel sets the "privacyLevel" field if the given value is not nil. +func (uc *UserCreate) SetNillablePrivacyLevel(ul *user.PrivacyLevel) *UserCreate { + if ul != nil { + uc.SetPrivacyLevel(*ul) + } + return uc +} + +// SetFields sets the "fields" field. +func (uc *UserCreate) SetFields(l []lysand.Field) *UserCreate { + uc.mutation.SetFields(l) + return uc +} + +// SetInbox sets the "inbox" field. +func (uc *UserCreate) SetInbox(s string) *UserCreate { + uc.mutation.SetInbox(s) + return uc +} + +// SetFeatured sets the "featured" field. +func (uc *UserCreate) SetFeatured(s string) *UserCreate { + uc.mutation.SetFeatured(s) + return uc +} + +// SetFollowers sets the "followers" field. +func (uc *UserCreate) SetFollowers(s string) *UserCreate { + uc.mutation.SetFollowers(s) + return uc +} + +// SetFollowing sets the "following" field. +func (uc *UserCreate) SetFollowing(s string) *UserCreate { + uc.mutation.SetFollowing(s) + return uc +} + +// SetOutbox sets the "outbox" field. +func (uc *UserCreate) SetOutbox(s string) *UserCreate { + uc.mutation.SetOutbox(s) + return uc +} + +// SetID sets the "id" field. +func (uc *UserCreate) SetID(u uuid.UUID) *UserCreate { + uc.mutation.SetID(u) + return uc +} + +// SetNillableID sets the "id" field if the given value is not nil. +func (uc *UserCreate) SetNillableID(u *uuid.UUID) *UserCreate { + if u != nil { + uc.SetID(*u) + } + return uc +} + +// SetAvatarImageID sets the "avatarImage" edge to the Image entity by ID. +func (uc *UserCreate) SetAvatarImageID(id int) *UserCreate { + uc.mutation.SetAvatarImageID(id) + return uc +} + +// SetNillableAvatarImageID sets the "avatarImage" edge to the Image entity by ID if the given value is not nil. +func (uc *UserCreate) SetNillableAvatarImageID(id *int) *UserCreate { + if id != nil { + uc = uc.SetAvatarImageID(*id) + } + return uc +} + +// SetAvatarImage sets the "avatarImage" edge to the Image entity. +func (uc *UserCreate) SetAvatarImage(i *Image) *UserCreate { + return uc.SetAvatarImageID(i.ID) +} + +// SetHeaderImageID sets the "headerImage" edge to the Image entity by ID. +func (uc *UserCreate) SetHeaderImageID(id int) *UserCreate { + uc.mutation.SetHeaderImageID(id) + return uc +} + +// SetNillableHeaderImageID sets the "headerImage" edge to the Image entity by ID if the given value is not nil. +func (uc *UserCreate) SetNillableHeaderImageID(id *int) *UserCreate { + if id != nil { + uc = uc.SetHeaderImageID(*id) + } + return uc +} + +// SetHeaderImage sets the "headerImage" edge to the Image entity. +func (uc *UserCreate) SetHeaderImage(i *Image) *UserCreate { + return uc.SetHeaderImageID(i.ID) +} + +// AddAuthoredNoteIDs adds the "authoredNotes" edge to the Note entity by IDs. +func (uc *UserCreate) AddAuthoredNoteIDs(ids ...uuid.UUID) *UserCreate { + uc.mutation.AddAuthoredNoteIDs(ids...) + return uc +} + +// AddAuthoredNotes adds the "authoredNotes" edges to the Note entity. +func (uc *UserCreate) AddAuthoredNotes(n ...*Note) *UserCreate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uc.AddAuthoredNoteIDs(ids...) +} + +// AddMentionedNoteIDs adds the "mentionedNotes" edge to the Note entity by IDs. +func (uc *UserCreate) AddMentionedNoteIDs(ids ...uuid.UUID) *UserCreate { + uc.mutation.AddMentionedNoteIDs(ids...) + return uc +} + +// AddMentionedNotes adds the "mentionedNotes" edges to the Note entity. +func (uc *UserCreate) AddMentionedNotes(n ...*Note) *UserCreate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uc.AddMentionedNoteIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uc *UserCreate) Mutation() *UserMutation { + return uc.mutation +} + +// Save creates the User in the database. +func (uc *UserCreate) Save(ctx context.Context) (*User, error) { + uc.defaults() + return withHooks(ctx, uc.sqlSave, uc.mutation, uc.hooks) +} + +// SaveX calls Save and panics if Save returns an error. +func (uc *UserCreate) SaveX(ctx context.Context) *User { + v, err := uc.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (uc *UserCreate) Exec(ctx context.Context) error { + _, err := uc.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uc *UserCreate) ExecX(ctx context.Context) { + if err := uc.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uc *UserCreate) defaults() { + if _, ok := uc.mutation.Extensions(); !ok { + v := user.DefaultExtensions + uc.mutation.SetExtensions(v) + } + if _, ok := uc.mutation.CreatedAt(); !ok { + v := user.DefaultCreatedAt() + uc.mutation.SetCreatedAt(v) + } + if _, ok := uc.mutation.UpdatedAt(); !ok { + v := user.DefaultUpdatedAt() + uc.mutation.SetUpdatedAt(v) + } + if _, ok := uc.mutation.Indexable(); !ok { + v := user.DefaultIndexable + uc.mutation.SetIndexable(v) + } + if _, ok := uc.mutation.PrivacyLevel(); !ok { + v := user.DefaultPrivacyLevel + uc.mutation.SetPrivacyLevel(v) + } + if _, ok := uc.mutation.GetFields(); !ok { + v := user.DefaultFields + uc.mutation.SetFields(v) + } + if _, ok := uc.mutation.ID(); !ok { + v := user.DefaultID() + uc.mutation.SetID(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uc *UserCreate) check() error { + if _, ok := uc.mutation.IsRemote(); !ok { + return &ValidationError{Name: "isRemote", err: errors.New(`ent: missing required field "User.isRemote"`)} + } + if _, ok := uc.mutation.URI(); !ok { + return &ValidationError{Name: "uri", err: errors.New(`ent: missing required field "User.uri"`)} + } + if v, ok := uc.mutation.URI(); ok { + if err := user.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "User.uri": %w`, err)} + } + } + if _, ok := uc.mutation.Extensions(); !ok { + return &ValidationError{Name: "extensions", err: errors.New(`ent: missing required field "User.extensions"`)} + } + if _, ok := uc.mutation.CreatedAt(); !ok { + return &ValidationError{Name: "created_at", err: errors.New(`ent: missing required field "User.created_at"`)} + } + if _, ok := uc.mutation.UpdatedAt(); !ok { + return &ValidationError{Name: "updated_at", err: errors.New(`ent: missing required field "User.updated_at"`)} + } + if _, ok := uc.mutation.Username(); !ok { + return &ValidationError{Name: "username", err: errors.New(`ent: missing required field "User.username"`)} + } + if v, ok := uc.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if v, ok := uc.mutation.DisplayName(); ok { + if err := user.DisplayNameValidator(v); err != nil { + return &ValidationError{Name: "displayName", err: fmt.Errorf(`ent: validator failed for field "User.displayName": %w`, err)} + } + } + if _, ok := uc.mutation.PublicKey(); !ok { + return &ValidationError{Name: "publicKey", err: errors.New(`ent: missing required field "User.publicKey"`)} + } + if _, ok := uc.mutation.Indexable(); !ok { + return &ValidationError{Name: "indexable", err: errors.New(`ent: missing required field "User.indexable"`)} + } + if _, ok := uc.mutation.PrivacyLevel(); !ok { + return &ValidationError{Name: "privacyLevel", err: errors.New(`ent: missing required field "User.privacyLevel"`)} + } + if v, ok := uc.mutation.PrivacyLevel(); ok { + if err := user.PrivacyLevelValidator(v); err != nil { + return &ValidationError{Name: "privacyLevel", err: fmt.Errorf(`ent: validator failed for field "User.privacyLevel": %w`, err)} + } + } + if _, ok := uc.mutation.GetFields(); !ok { + return &ValidationError{Name: "fields", err: errors.New(`ent: missing required field "User.fields"`)} + } + if _, ok := uc.mutation.Inbox(); !ok { + return &ValidationError{Name: "inbox", err: errors.New(`ent: missing required field "User.inbox"`)} + } + if v, ok := uc.mutation.Inbox(); ok { + if err := user.InboxValidator(v); err != nil { + return &ValidationError{Name: "inbox", err: fmt.Errorf(`ent: validator failed for field "User.inbox": %w`, err)} + } + } + if _, ok := uc.mutation.Featured(); !ok { + return &ValidationError{Name: "featured", err: errors.New(`ent: missing required field "User.featured"`)} + } + if v, ok := uc.mutation.Featured(); ok { + if err := user.FeaturedValidator(v); err != nil { + return &ValidationError{Name: "featured", err: fmt.Errorf(`ent: validator failed for field "User.featured": %w`, err)} + } + } + if _, ok := uc.mutation.Followers(); !ok { + return &ValidationError{Name: "followers", err: errors.New(`ent: missing required field "User.followers"`)} + } + if v, ok := uc.mutation.Followers(); ok { + if err := user.FollowersValidator(v); err != nil { + return &ValidationError{Name: "followers", err: fmt.Errorf(`ent: validator failed for field "User.followers": %w`, err)} + } + } + if _, ok := uc.mutation.Following(); !ok { + return &ValidationError{Name: "following", err: errors.New(`ent: missing required field "User.following"`)} + } + if v, ok := uc.mutation.Following(); ok { + if err := user.FollowingValidator(v); err != nil { + return &ValidationError{Name: "following", err: fmt.Errorf(`ent: validator failed for field "User.following": %w`, err)} + } + } + if _, ok := uc.mutation.Outbox(); !ok { + return &ValidationError{Name: "outbox", err: errors.New(`ent: missing required field "User.outbox"`)} + } + if v, ok := uc.mutation.Outbox(); ok { + if err := user.OutboxValidator(v); err != nil { + return &ValidationError{Name: "outbox", err: fmt.Errorf(`ent: validator failed for field "User.outbox": %w`, err)} + } + } + return nil +} + +func (uc *UserCreate) sqlSave(ctx context.Context) (*User, error) { + if err := uc.check(); err != nil { + return nil, err + } + _node, _spec := uc.createSpec() + if err := sqlgraph.CreateNode(ctx, uc.driver, _spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + if _spec.ID.Value != nil { + if id, ok := _spec.ID.Value.(*uuid.UUID); ok { + _node.ID = *id + } else if err := _node.ID.Scan(_spec.ID.Value); err != nil { + return nil, err + } + } + uc.mutation.id = &_node.ID + uc.mutation.done = true + return _node, nil +} + +func (uc *UserCreate) createSpec() (*User, *sqlgraph.CreateSpec) { + var ( + _node = &User{config: uc.config} + _spec = sqlgraph.NewCreateSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) + ) + _spec.OnConflict = uc.conflict + if id, ok := uc.mutation.ID(); ok { + _node.ID = id + _spec.ID.Value = &id + } + if value, ok := uc.mutation.IsRemote(); ok { + _spec.SetField(user.FieldIsRemote, field.TypeBool, value) + _node.IsRemote = value + } + if value, ok := uc.mutation.URI(); ok { + _spec.SetField(user.FieldURI, field.TypeString, value) + _node.URI = value + } + if value, ok := uc.mutation.Extensions(); ok { + _spec.SetField(user.FieldExtensions, field.TypeJSON, value) + _node.Extensions = value + } + if value, ok := uc.mutation.CreatedAt(); ok { + _spec.SetField(user.FieldCreatedAt, field.TypeTime, value) + _node.CreatedAt = value + } + if value, ok := uc.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + _node.UpdatedAt = value + } + if value, ok := uc.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + _node.Username = value + } + if value, ok := uc.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeBytes, value) + _node.PasswordHash = &value + } + if value, ok := uc.mutation.DisplayName(); ok { + _spec.SetField(user.FieldDisplayName, field.TypeString, value) + _node.DisplayName = &value + } + if value, ok := uc.mutation.Biography(); ok { + _spec.SetField(user.FieldBiography, field.TypeString, value) + _node.Biography = &value + } + if value, ok := uc.mutation.PublicKey(); ok { + _spec.SetField(user.FieldPublicKey, field.TypeBytes, value) + _node.PublicKey = value + } + if value, ok := uc.mutation.PrivateKey(); ok { + _spec.SetField(user.FieldPrivateKey, field.TypeBytes, value) + _node.PrivateKey = value + } + if value, ok := uc.mutation.Indexable(); ok { + _spec.SetField(user.FieldIndexable, field.TypeBool, value) + _node.Indexable = value + } + if value, ok := uc.mutation.PrivacyLevel(); ok { + _spec.SetField(user.FieldPrivacyLevel, field.TypeEnum, value) + _node.PrivacyLevel = value + } + if value, ok := uc.mutation.GetFields(); ok { + _spec.SetField(user.FieldFields, field.TypeJSON, value) + _node.Fields = value + } + if value, ok := uc.mutation.Inbox(); ok { + _spec.SetField(user.FieldInbox, field.TypeString, value) + _node.Inbox = value + } + if value, ok := uc.mutation.Featured(); ok { + _spec.SetField(user.FieldFeatured, field.TypeString, value) + _node.Featured = value + } + if value, ok := uc.mutation.Followers(); ok { + _spec.SetField(user.FieldFollowers, field.TypeString, value) + _node.Followers = value + } + if value, ok := uc.mutation.Following(); ok { + _spec.SetField(user.FieldFollowing, field.TypeString, value) + _node.Following = value + } + if value, ok := uc.mutation.Outbox(); ok { + _spec.SetField(user.FieldOutbox, field.TypeString, value) + _node.Outbox = value + } + if nodes := uc.mutation.AvatarImageIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.AvatarImageTable, + Columns: []string{user.AvatarImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.user_avatar_image = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := uc.mutation.HeaderImageIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.HeaderImageTable, + Columns: []string{user.HeaderImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _node.user_header_image = &nodes[0] + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := uc.mutation.AuthoredNotesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + if nodes := uc.mutation.MentionedNotesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges = append(_spec.Edges, edge) + } + return _node, _spec +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.Create(). +// SetIsRemote(v). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (uc *UserCreate) OnConflict(opts ...sql.ConflictOption) *UserUpsertOne { + uc.conflict = opts + return &UserUpsertOne{ + create: uc, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (uc *UserCreate) OnConflictColumns(columns ...string) *UserUpsertOne { + uc.conflict = append(uc.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertOne{ + create: uc, + } +} + +type ( + // UserUpsertOne is the builder for "upsert"-ing + // one User node. + UserUpsertOne struct { + create *UserCreate + } + + // UserUpsert is the "OnConflict" setter. + UserUpsert struct { + *sql.UpdateSet + } +) + +// SetIsRemote sets the "isRemote" field. +func (u *UserUpsert) SetIsRemote(v bool) *UserUpsert { + u.Set(user.FieldIsRemote, v) + return u +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *UserUpsert) UpdateIsRemote() *UserUpsert { + u.SetExcluded(user.FieldIsRemote) + return u +} + +// SetURI sets the "uri" field. +func (u *UserUpsert) SetURI(v string) *UserUpsert { + u.Set(user.FieldURI, v) + return u +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *UserUpsert) UpdateURI() *UserUpsert { + u.SetExcluded(user.FieldURI) + return u +} + +// SetExtensions sets the "extensions" field. +func (u *UserUpsert) SetExtensions(v lysand.Extensions) *UserUpsert { + u.Set(user.FieldExtensions, v) + return u +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *UserUpsert) UpdateExtensions() *UserUpsert { + u.SetExcluded(user.FieldExtensions) + return u +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsert) SetUpdatedAt(v time.Time) *UserUpsert { + u.Set(user.FieldUpdatedAt, v) + return u +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsert) UpdateUpdatedAt() *UserUpsert { + u.SetExcluded(user.FieldUpdatedAt) + return u +} + +// SetUsername sets the "username" field. +func (u *UserUpsert) SetUsername(v string) *UserUpsert { + u.Set(user.FieldUsername, v) + return u +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsert) UpdateUsername() *UserUpsert { + u.SetExcluded(user.FieldUsername) + return u +} + +// SetPasswordHash sets the "passwordHash" field. +func (u *UserUpsert) SetPasswordHash(v []byte) *UserUpsert { + u.Set(user.FieldPasswordHash, v) + return u +} + +// UpdatePasswordHash sets the "passwordHash" field to the value that was provided on create. +func (u *UserUpsert) UpdatePasswordHash() *UserUpsert { + u.SetExcluded(user.FieldPasswordHash) + return u +} + +// ClearPasswordHash clears the value of the "passwordHash" field. +func (u *UserUpsert) ClearPasswordHash() *UserUpsert { + u.SetNull(user.FieldPasswordHash) + return u +} + +// SetDisplayName sets the "displayName" field. +func (u *UserUpsert) SetDisplayName(v string) *UserUpsert { + u.Set(user.FieldDisplayName, v) + return u +} + +// UpdateDisplayName sets the "displayName" field to the value that was provided on create. +func (u *UserUpsert) UpdateDisplayName() *UserUpsert { + u.SetExcluded(user.FieldDisplayName) + return u +} + +// ClearDisplayName clears the value of the "displayName" field. +func (u *UserUpsert) ClearDisplayName() *UserUpsert { + u.SetNull(user.FieldDisplayName) + return u +} + +// SetBiography sets the "biography" field. +func (u *UserUpsert) SetBiography(v string) *UserUpsert { + u.Set(user.FieldBiography, v) + return u +} + +// UpdateBiography sets the "biography" field to the value that was provided on create. +func (u *UserUpsert) UpdateBiography() *UserUpsert { + u.SetExcluded(user.FieldBiography) + return u +} + +// ClearBiography clears the value of the "biography" field. +func (u *UserUpsert) ClearBiography() *UserUpsert { + u.SetNull(user.FieldBiography) + return u +} + +// SetPublicKey sets the "publicKey" field. +func (u *UserUpsert) SetPublicKey(v ed25519.PublicKey) *UserUpsert { + u.Set(user.FieldPublicKey, v) + return u +} + +// UpdatePublicKey sets the "publicKey" field to the value that was provided on create. +func (u *UserUpsert) UpdatePublicKey() *UserUpsert { + u.SetExcluded(user.FieldPublicKey) + return u +} + +// SetPrivateKey sets the "privateKey" field. +func (u *UserUpsert) SetPrivateKey(v ed25519.PrivateKey) *UserUpsert { + u.Set(user.FieldPrivateKey, v) + return u +} + +// UpdatePrivateKey sets the "privateKey" field to the value that was provided on create. +func (u *UserUpsert) UpdatePrivateKey() *UserUpsert { + u.SetExcluded(user.FieldPrivateKey) + return u +} + +// ClearPrivateKey clears the value of the "privateKey" field. +func (u *UserUpsert) ClearPrivateKey() *UserUpsert { + u.SetNull(user.FieldPrivateKey) + return u +} + +// SetIndexable sets the "indexable" field. +func (u *UserUpsert) SetIndexable(v bool) *UserUpsert { + u.Set(user.FieldIndexable, v) + return u +} + +// UpdateIndexable sets the "indexable" field to the value that was provided on create. +func (u *UserUpsert) UpdateIndexable() *UserUpsert { + u.SetExcluded(user.FieldIndexable) + return u +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (u *UserUpsert) SetPrivacyLevel(v user.PrivacyLevel) *UserUpsert { + u.Set(user.FieldPrivacyLevel, v) + return u +} + +// UpdatePrivacyLevel sets the "privacyLevel" field to the value that was provided on create. +func (u *UserUpsert) UpdatePrivacyLevel() *UserUpsert { + u.SetExcluded(user.FieldPrivacyLevel) + return u +} + +// SetFields sets the "fields" field. +func (u *UserUpsert) SetFields(v []lysand.Field) *UserUpsert { + u.Set(user.FieldFields, v) + return u +} + +// UpdateFields sets the "fields" field to the value that was provided on create. +func (u *UserUpsert) UpdateFields() *UserUpsert { + u.SetExcluded(user.FieldFields) + return u +} + +// SetInbox sets the "inbox" field. +func (u *UserUpsert) SetInbox(v string) *UserUpsert { + u.Set(user.FieldInbox, v) + return u +} + +// UpdateInbox sets the "inbox" field to the value that was provided on create. +func (u *UserUpsert) UpdateInbox() *UserUpsert { + u.SetExcluded(user.FieldInbox) + return u +} + +// SetFeatured sets the "featured" field. +func (u *UserUpsert) SetFeatured(v string) *UserUpsert { + u.Set(user.FieldFeatured, v) + return u +} + +// UpdateFeatured sets the "featured" field to the value that was provided on create. +func (u *UserUpsert) UpdateFeatured() *UserUpsert { + u.SetExcluded(user.FieldFeatured) + return u +} + +// SetFollowers sets the "followers" field. +func (u *UserUpsert) SetFollowers(v string) *UserUpsert { + u.Set(user.FieldFollowers, v) + return u +} + +// UpdateFollowers sets the "followers" field to the value that was provided on create. +func (u *UserUpsert) UpdateFollowers() *UserUpsert { + u.SetExcluded(user.FieldFollowers) + return u +} + +// SetFollowing sets the "following" field. +func (u *UserUpsert) SetFollowing(v string) *UserUpsert { + u.Set(user.FieldFollowing, v) + return u +} + +// UpdateFollowing sets the "following" field to the value that was provided on create. +func (u *UserUpsert) UpdateFollowing() *UserUpsert { + u.SetExcluded(user.FieldFollowing) + return u +} + +// SetOutbox sets the "outbox" field. +func (u *UserUpsert) SetOutbox(v string) *UserUpsert { + u.Set(user.FieldOutbox, v) + return u +} + +// UpdateOutbox sets the "outbox" field to the value that was provided on create. +func (u *UserUpsert) UpdateOutbox() *UserUpsert { + u.SetExcluded(user.FieldOutbox) + return u +} + +// UpdateNewValues updates the mutable fields using the new values that were set on create except the ID field. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(user.FieldID) +// }), +// ). +// Exec(ctx) +func (u *UserUpsertOne) UpdateNewValues() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + if _, exists := u.create.mutation.ID(); exists { + s.SetIgnore(user.FieldID) + } + if _, exists := u.create.mutation.CreatedAt(); exists { + s.SetIgnore(user.FieldCreatedAt) + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertOne) Ignore() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertOne) DoNothing() *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreate.OnConflict +// documentation for more info. +func (u *UserUpsertOne) Update(set func(*UserUpsert)) *UserUpsertOne { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *UserUpsertOne) SetIsRemote(v bool) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateIsRemote() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *UserUpsertOne) SetURI(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateURI() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *UserUpsertOne) SetExtensions(v lysand.Extensions) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateExtensions() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsertOne) SetUpdatedAt(v time.Time) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUpdatedAt() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUsername sets the "username" field. +func (u *UserUpsertOne) SetUsername(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateUsername() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateUsername() + }) +} + +// SetPasswordHash sets the "passwordHash" field. +func (u *UserUpsertOne) SetPasswordHash(v []byte) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetPasswordHash(v) + }) +} + +// UpdatePasswordHash sets the "passwordHash" field to the value that was provided on create. +func (u *UserUpsertOne) UpdatePasswordHash() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdatePasswordHash() + }) +} + +// ClearPasswordHash clears the value of the "passwordHash" field. +func (u *UserUpsertOne) ClearPasswordHash() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearPasswordHash() + }) +} + +// SetDisplayName sets the "displayName" field. +func (u *UserUpsertOne) SetDisplayName(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetDisplayName(v) + }) +} + +// UpdateDisplayName sets the "displayName" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateDisplayName() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateDisplayName() + }) +} + +// ClearDisplayName clears the value of the "displayName" field. +func (u *UserUpsertOne) ClearDisplayName() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearDisplayName() + }) +} + +// SetBiography sets the "biography" field. +func (u *UserUpsertOne) SetBiography(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetBiography(v) + }) +} + +// UpdateBiography sets the "biography" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateBiography() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateBiography() + }) +} + +// ClearBiography clears the value of the "biography" field. +func (u *UserUpsertOne) ClearBiography() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearBiography() + }) +} + +// SetPublicKey sets the "publicKey" field. +func (u *UserUpsertOne) SetPublicKey(v ed25519.PublicKey) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetPublicKey(v) + }) +} + +// UpdatePublicKey sets the "publicKey" field to the value that was provided on create. +func (u *UserUpsertOne) UpdatePublicKey() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdatePublicKey() + }) +} + +// SetPrivateKey sets the "privateKey" field. +func (u *UserUpsertOne) SetPrivateKey(v ed25519.PrivateKey) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetPrivateKey(v) + }) +} + +// UpdatePrivateKey sets the "privateKey" field to the value that was provided on create. +func (u *UserUpsertOne) UpdatePrivateKey() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdatePrivateKey() + }) +} + +// ClearPrivateKey clears the value of the "privateKey" field. +func (u *UserUpsertOne) ClearPrivateKey() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.ClearPrivateKey() + }) +} + +// SetIndexable sets the "indexable" field. +func (u *UserUpsertOne) SetIndexable(v bool) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetIndexable(v) + }) +} + +// UpdateIndexable sets the "indexable" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateIndexable() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateIndexable() + }) +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (u *UserUpsertOne) SetPrivacyLevel(v user.PrivacyLevel) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetPrivacyLevel(v) + }) +} + +// UpdatePrivacyLevel sets the "privacyLevel" field to the value that was provided on create. +func (u *UserUpsertOne) UpdatePrivacyLevel() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdatePrivacyLevel() + }) +} + +// SetFields sets the "fields" field. +func (u *UserUpsertOne) SetFields(v []lysand.Field) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetFields(v) + }) +} + +// UpdateFields sets the "fields" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateFields() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateFields() + }) +} + +// SetInbox sets the "inbox" field. +func (u *UserUpsertOne) SetInbox(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetInbox(v) + }) +} + +// UpdateInbox sets the "inbox" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateInbox() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateInbox() + }) +} + +// SetFeatured sets the "featured" field. +func (u *UserUpsertOne) SetFeatured(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetFeatured(v) + }) +} + +// UpdateFeatured sets the "featured" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateFeatured() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateFeatured() + }) +} + +// SetFollowers sets the "followers" field. +func (u *UserUpsertOne) SetFollowers(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetFollowers(v) + }) +} + +// UpdateFollowers sets the "followers" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateFollowers() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateFollowers() + }) +} + +// SetFollowing sets the "following" field. +func (u *UserUpsertOne) SetFollowing(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetFollowing(v) + }) +} + +// UpdateFollowing sets the "following" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateFollowing() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateFollowing() + }) +} + +// SetOutbox sets the "outbox" field. +func (u *UserUpsertOne) SetOutbox(v string) *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.SetOutbox(v) + }) +} + +// UpdateOutbox sets the "outbox" field to the value that was provided on create. +func (u *UserUpsertOne) UpdateOutbox() *UserUpsertOne { + return u.Update(func(s *UserUpsert) { + s.UpdateOutbox() + }) +} + +// Exec executes the query. +func (u *UserUpsertOne) Exec(ctx context.Context) error { + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreate.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertOne) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} + +// Exec executes the UPSERT query and returns the inserted/updated ID. +func (u *UserUpsertOne) ID(ctx context.Context) (id uuid.UUID, err error) { + if u.create.driver.Dialect() == dialect.MySQL { + // In case of "ON CONFLICT", there is no way to get back non-numeric ID + // fields from the database since MySQL does not support the RETURNING clause. + return id, errors.New("ent: UserUpsertOne.ID is not supported by MySQL driver. Use UserUpsertOne.Exec instead") + } + node, err := u.create.Save(ctx) + if err != nil { + return id, err + } + return node.ID, nil +} + +// IDX is like ID, but panics if an error occurs. +func (u *UserUpsertOne) IDX(ctx context.Context) uuid.UUID { + id, err := u.ID(ctx) + if err != nil { + panic(err) + } + return id +} + +// UserCreateBulk is the builder for creating many User entities in bulk. +type UserCreateBulk struct { + config + err error + builders []*UserCreate + conflict []sql.ConflictOption +} + +// Save creates the User entities in the database. +func (ucb *UserCreateBulk) Save(ctx context.Context) ([]*User, error) { + if ucb.err != nil { + return nil, ucb.err + } + specs := make([]*sqlgraph.CreateSpec, len(ucb.builders)) + nodes := make([]*User, len(ucb.builders)) + mutators := make([]Mutator, len(ucb.builders)) + for i := range ucb.builders { + func(i int, root context.Context) { + builder := ucb.builders[i] + builder.defaults() + var mut Mutator = MutateFunc(func(ctx context.Context, m Mutation) (Value, error) { + mutation, ok := m.(*UserMutation) + if !ok { + return nil, fmt.Errorf("unexpected mutation type %T", m) + } + if err := builder.check(); err != nil { + return nil, err + } + builder.mutation = mutation + var err error + nodes[i], specs[i] = builder.createSpec() + if i < len(mutators)-1 { + _, err = mutators[i+1].Mutate(root, ucb.builders[i+1].mutation) + } else { + spec := &sqlgraph.BatchCreateSpec{Nodes: specs} + spec.OnConflict = ucb.conflict + // Invoke the actual operation on the latest mutation in the chain. + if err = sqlgraph.BatchCreate(ctx, ucb.driver, spec); err != nil { + if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + } + } + if err != nil { + return nil, err + } + mutation.id = &nodes[i].ID + mutation.done = true + return nodes[i], nil + }) + for i := len(builder.hooks) - 1; i >= 0; i-- { + mut = builder.hooks[i](mut) + } + mutators[i] = mut + }(i, ctx) + } + if len(mutators) > 0 { + if _, err := mutators[0].Mutate(ctx, ucb.builders[0].mutation); err != nil { + return nil, err + } + } + return nodes, nil +} + +// SaveX is like Save, but panics if an error occurs. +func (ucb *UserCreateBulk) SaveX(ctx context.Context) []*User { + v, err := ucb.Save(ctx) + if err != nil { + panic(err) + } + return v +} + +// Exec executes the query. +func (ucb *UserCreateBulk) Exec(ctx context.Context) error { + _, err := ucb.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (ucb *UserCreateBulk) ExecX(ctx context.Context) { + if err := ucb.Exec(ctx); err != nil { + panic(err) + } +} + +// OnConflict allows configuring the `ON CONFLICT` / `ON DUPLICATE KEY` clause +// of the `INSERT` statement. For example: +// +// client.User.CreateBulk(builders...). +// OnConflict( +// // Update the row with the new values +// // the was proposed for insertion. +// sql.ResolveWithNewValues(), +// ). +// // Override some of the fields with custom +// // update values. +// Update(func(u *ent.UserUpsert) { +// SetIsRemote(v+v). +// }). +// Exec(ctx) +func (ucb *UserCreateBulk) OnConflict(opts ...sql.ConflictOption) *UserUpsertBulk { + ucb.conflict = opts + return &UserUpsertBulk{ + create: ucb, + } +} + +// OnConflictColumns calls `OnConflict` and configures the columns +// as conflict target. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ConflictColumns(columns...)). +// Exec(ctx) +func (ucb *UserCreateBulk) OnConflictColumns(columns ...string) *UserUpsertBulk { + ucb.conflict = append(ucb.conflict, sql.ConflictColumns(columns...)) + return &UserUpsertBulk{ + create: ucb, + } +} + +// UserUpsertBulk is the builder for "upsert"-ing +// a bulk of User nodes. +type UserUpsertBulk struct { + create *UserCreateBulk +} + +// UpdateNewValues updates the mutable fields using the new values that +// were set on create. Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict( +// sql.ResolveWithNewValues(), +// sql.ResolveWith(func(u *sql.UpdateSet) { +// u.SetIgnore(user.FieldID) +// }), +// ). +// Exec(ctx) +func (u *UserUpsertBulk) UpdateNewValues() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithNewValues()) + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(s *sql.UpdateSet) { + for _, b := range u.create.builders { + if _, exists := b.mutation.ID(); exists { + s.SetIgnore(user.FieldID) + } + if _, exists := b.mutation.CreatedAt(); exists { + s.SetIgnore(user.FieldCreatedAt) + } + } + })) + return u +} + +// Ignore sets each column to itself in case of conflict. +// Using this option is equivalent to using: +// +// client.User.Create(). +// OnConflict(sql.ResolveWithIgnore()). +// Exec(ctx) +func (u *UserUpsertBulk) Ignore() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWithIgnore()) + return u +} + +// DoNothing configures the conflict_action to `DO NOTHING`. +// Supported only by SQLite and PostgreSQL. +func (u *UserUpsertBulk) DoNothing() *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.DoNothing()) + return u +} + +// Update allows overriding fields `UPDATE` values. See the UserCreateBulk.OnConflict +// documentation for more info. +func (u *UserUpsertBulk) Update(set func(*UserUpsert)) *UserUpsertBulk { + u.create.conflict = append(u.create.conflict, sql.ResolveWith(func(update *sql.UpdateSet) { + set(&UserUpsert{UpdateSet: update}) + })) + return u +} + +// SetIsRemote sets the "isRemote" field. +func (u *UserUpsertBulk) SetIsRemote(v bool) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetIsRemote(v) + }) +} + +// UpdateIsRemote sets the "isRemote" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateIsRemote() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateIsRemote() + }) +} + +// SetURI sets the "uri" field. +func (u *UserUpsertBulk) SetURI(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetURI(v) + }) +} + +// UpdateURI sets the "uri" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateURI() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateURI() + }) +} + +// SetExtensions sets the "extensions" field. +func (u *UserUpsertBulk) SetExtensions(v lysand.Extensions) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetExtensions(v) + }) +} + +// UpdateExtensions sets the "extensions" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateExtensions() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateExtensions() + }) +} + +// SetUpdatedAt sets the "updated_at" field. +func (u *UserUpsertBulk) SetUpdatedAt(v time.Time) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUpdatedAt(v) + }) +} + +// UpdateUpdatedAt sets the "updated_at" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUpdatedAt() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUpdatedAt() + }) +} + +// SetUsername sets the "username" field. +func (u *UserUpsertBulk) SetUsername(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetUsername(v) + }) +} + +// UpdateUsername sets the "username" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateUsername() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateUsername() + }) +} + +// SetPasswordHash sets the "passwordHash" field. +func (u *UserUpsertBulk) SetPasswordHash(v []byte) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetPasswordHash(v) + }) +} + +// UpdatePasswordHash sets the "passwordHash" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdatePasswordHash() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdatePasswordHash() + }) +} + +// ClearPasswordHash clears the value of the "passwordHash" field. +func (u *UserUpsertBulk) ClearPasswordHash() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearPasswordHash() + }) +} + +// SetDisplayName sets the "displayName" field. +func (u *UserUpsertBulk) SetDisplayName(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetDisplayName(v) + }) +} + +// UpdateDisplayName sets the "displayName" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateDisplayName() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateDisplayName() + }) +} + +// ClearDisplayName clears the value of the "displayName" field. +func (u *UserUpsertBulk) ClearDisplayName() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearDisplayName() + }) +} + +// SetBiography sets the "biography" field. +func (u *UserUpsertBulk) SetBiography(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetBiography(v) + }) +} + +// UpdateBiography sets the "biography" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateBiography() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateBiography() + }) +} + +// ClearBiography clears the value of the "biography" field. +func (u *UserUpsertBulk) ClearBiography() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearBiography() + }) +} + +// SetPublicKey sets the "publicKey" field. +func (u *UserUpsertBulk) SetPublicKey(v ed25519.PublicKey) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetPublicKey(v) + }) +} + +// UpdatePublicKey sets the "publicKey" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdatePublicKey() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdatePublicKey() + }) +} + +// SetPrivateKey sets the "privateKey" field. +func (u *UserUpsertBulk) SetPrivateKey(v ed25519.PrivateKey) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetPrivateKey(v) + }) +} + +// UpdatePrivateKey sets the "privateKey" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdatePrivateKey() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdatePrivateKey() + }) +} + +// ClearPrivateKey clears the value of the "privateKey" field. +func (u *UserUpsertBulk) ClearPrivateKey() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.ClearPrivateKey() + }) +} + +// SetIndexable sets the "indexable" field. +func (u *UserUpsertBulk) SetIndexable(v bool) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetIndexable(v) + }) +} + +// UpdateIndexable sets the "indexable" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateIndexable() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateIndexable() + }) +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (u *UserUpsertBulk) SetPrivacyLevel(v user.PrivacyLevel) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetPrivacyLevel(v) + }) +} + +// UpdatePrivacyLevel sets the "privacyLevel" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdatePrivacyLevel() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdatePrivacyLevel() + }) +} + +// SetFields sets the "fields" field. +func (u *UserUpsertBulk) SetFields(v []lysand.Field) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetFields(v) + }) +} + +// UpdateFields sets the "fields" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateFields() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateFields() + }) +} + +// SetInbox sets the "inbox" field. +func (u *UserUpsertBulk) SetInbox(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetInbox(v) + }) +} + +// UpdateInbox sets the "inbox" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateInbox() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateInbox() + }) +} + +// SetFeatured sets the "featured" field. +func (u *UserUpsertBulk) SetFeatured(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetFeatured(v) + }) +} + +// UpdateFeatured sets the "featured" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateFeatured() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateFeatured() + }) +} + +// SetFollowers sets the "followers" field. +func (u *UserUpsertBulk) SetFollowers(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetFollowers(v) + }) +} + +// UpdateFollowers sets the "followers" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateFollowers() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateFollowers() + }) +} + +// SetFollowing sets the "following" field. +func (u *UserUpsertBulk) SetFollowing(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetFollowing(v) + }) +} + +// UpdateFollowing sets the "following" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateFollowing() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateFollowing() + }) +} + +// SetOutbox sets the "outbox" field. +func (u *UserUpsertBulk) SetOutbox(v string) *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.SetOutbox(v) + }) +} + +// UpdateOutbox sets the "outbox" field to the value that was provided on create. +func (u *UserUpsertBulk) UpdateOutbox() *UserUpsertBulk { + return u.Update(func(s *UserUpsert) { + s.UpdateOutbox() + }) +} + +// Exec executes the query. +func (u *UserUpsertBulk) Exec(ctx context.Context) error { + if u.create.err != nil { + return u.create.err + } + for i, b := range u.create.builders { + if len(b.conflict) != 0 { + return fmt.Errorf("ent: OnConflict was set for builder %d. Set it on the UserCreateBulk instead", i) + } + } + if len(u.create.conflict) == 0 { + return errors.New("ent: missing options for UserCreateBulk.OnConflict") + } + return u.create.Exec(ctx) +} + +// ExecX is like Exec, but panics if an error occurs. +func (u *UserUpsertBulk) ExecX(ctx context.Context) { + if err := u.create.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/user_delete.go b/ent/user_delete.go new file mode 100644 index 0000000..5383f7b --- /dev/null +++ b/ent/user_delete.go @@ -0,0 +1,88 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" +) + +// UserDelete is the builder for deleting a User entity. +type UserDelete struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserDelete builder. +func (ud *UserDelete) Where(ps ...predicate.User) *UserDelete { + ud.mutation.Where(ps...) + return ud +} + +// Exec executes the deletion query and returns how many vertices were deleted. +func (ud *UserDelete) Exec(ctx context.Context) (int, error) { + return withHooks(ctx, ud.sqlExec, ud.mutation, ud.hooks) +} + +// ExecX is like Exec, but panics if an error occurs. +func (ud *UserDelete) ExecX(ctx context.Context) int { + n, err := ud.Exec(ctx) + if err != nil { + panic(err) + } + return n +} + +func (ud *UserDelete) sqlExec(ctx context.Context) (int, error) { + _spec := sqlgraph.NewDeleteSpec(user.Table, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) + if ps := ud.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + affected, err := sqlgraph.DeleteNodes(ctx, ud.driver, _spec) + if err != nil && sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + ud.mutation.done = true + return affected, err +} + +// UserDeleteOne is the builder for deleting a single User entity. +type UserDeleteOne struct { + ud *UserDelete +} + +// Where appends a list predicates to the UserDelete builder. +func (udo *UserDeleteOne) Where(ps ...predicate.User) *UserDeleteOne { + udo.ud.mutation.Where(ps...) + return udo +} + +// Exec executes the deletion query. +func (udo *UserDeleteOne) Exec(ctx context.Context) error { + n, err := udo.ud.Exec(ctx) + switch { + case err != nil: + return err + case n == 0: + return &NotFoundError{user.Label} + default: + return nil + } +} + +// ExecX is like Exec, but panics if an error occurs. +func (udo *UserDeleteOne) ExecX(ctx context.Context) { + if err := udo.Exec(ctx); err != nil { + panic(err) + } +} diff --git a/ent/user_query.go b/ent/user_query.go new file mode 100644 index 0000000..e06d775 --- /dev/null +++ b/ent/user_query.go @@ -0,0 +1,868 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "database/sql/driver" + "fmt" + "math" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" +) + +// UserQuery is the builder for querying User entities. +type UserQuery struct { + config + ctx *QueryContext + order []user.OrderOption + inters []Interceptor + predicates []predicate.User + withAvatarImage *ImageQuery + withHeaderImage *ImageQuery + withAuthoredNotes *NoteQuery + withMentionedNotes *NoteQuery + withFKs bool + // intermediate query (i.e. traversal path). + sql *sql.Selector + path func(context.Context) (*sql.Selector, error) +} + +// Where adds a new predicate for the UserQuery builder. +func (uq *UserQuery) Where(ps ...predicate.User) *UserQuery { + uq.predicates = append(uq.predicates, ps...) + return uq +} + +// Limit the number of records to be returned by this query. +func (uq *UserQuery) Limit(limit int) *UserQuery { + uq.ctx.Limit = &limit + return uq +} + +// Offset to start from. +func (uq *UserQuery) Offset(offset int) *UserQuery { + uq.ctx.Offset = &offset + return uq +} + +// Unique configures the query builder to filter duplicate records on query. +// By default, unique is set to true, and can be disabled using this method. +func (uq *UserQuery) Unique(unique bool) *UserQuery { + uq.ctx.Unique = &unique + return uq +} + +// Order specifies how the records should be ordered. +func (uq *UserQuery) Order(o ...user.OrderOption) *UserQuery { + uq.order = append(uq.order, o...) + return uq +} + +// QueryAvatarImage chains the current query on the "avatarImage" edge. +func (uq *UserQuery) QueryAvatarImage() *ImageQuery { + query := (&ImageClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(image.Table, image.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, user.AvatarImageTable, user.AvatarImageColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryHeaderImage chains the current query on the "headerImage" edge. +func (uq *UserQuery) QueryHeaderImage() *ImageQuery { + query := (&ImageClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(image.Table, image.FieldID), + sqlgraph.Edge(sqlgraph.M2O, false, user.HeaderImageTable, user.HeaderImageColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryAuthoredNotes chains the current query on the "authoredNotes" edge. +func (uq *UserQuery) QueryAuthoredNotes() *NoteQuery { + query := (&NoteClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(note.Table, note.FieldID), + sqlgraph.Edge(sqlgraph.O2M, true, user.AuthoredNotesTable, user.AuthoredNotesColumn), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// QueryMentionedNotes chains the current query on the "mentionedNotes" edge. +func (uq *UserQuery) QueryMentionedNotes() *NoteQuery { + query := (&NoteClient{config: uq.config}).Query() + query.path = func(ctx context.Context) (fromU *sql.Selector, err error) { + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + selector := uq.sqlQuery(ctx) + if err := selector.Err(); err != nil { + return nil, err + } + step := sqlgraph.NewStep( + sqlgraph.From(user.Table, user.FieldID, selector), + sqlgraph.To(note.Table, note.FieldID), + sqlgraph.Edge(sqlgraph.M2M, true, user.MentionedNotesTable, user.MentionedNotesPrimaryKey...), + ) + fromU = sqlgraph.SetNeighbors(uq.driver.Dialect(), step) + return fromU, nil + } + return query +} + +// First returns the first User entity from the query. +// Returns a *NotFoundError when no User was found. +func (uq *UserQuery) First(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(1).All(setContextOp(ctx, uq.ctx, "First")) + if err != nil { + return nil, err + } + if len(nodes) == 0 { + return nil, &NotFoundError{user.Label} + } + return nodes[0], nil +} + +// FirstX is like First, but panics if an error occurs. +func (uq *UserQuery) FirstX(ctx context.Context) *User { + node, err := uq.First(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return node +} + +// FirstID returns the first User ID from the query. +// Returns a *NotFoundError when no User ID was found. +func (uq *UserQuery) FirstID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = uq.Limit(1).IDs(setContextOp(ctx, uq.ctx, "FirstID")); err != nil { + return + } + if len(ids) == 0 { + err = &NotFoundError{user.Label} + return + } + return ids[0], nil +} + +// FirstIDX is like FirstID, but panics if an error occurs. +func (uq *UserQuery) FirstIDX(ctx context.Context) uuid.UUID { + id, err := uq.FirstID(ctx) + if err != nil && !IsNotFound(err) { + panic(err) + } + return id +} + +// Only returns a single User entity found by the query, ensuring it only returns one. +// Returns a *NotSingularError when more than one User entity is found. +// Returns a *NotFoundError when no User entities are found. +func (uq *UserQuery) Only(ctx context.Context) (*User, error) { + nodes, err := uq.Limit(2).All(setContextOp(ctx, uq.ctx, "Only")) + if err != nil { + return nil, err + } + switch len(nodes) { + case 1: + return nodes[0], nil + case 0: + return nil, &NotFoundError{user.Label} + default: + return nil, &NotSingularError{user.Label} + } +} + +// OnlyX is like Only, but panics if an error occurs. +func (uq *UserQuery) OnlyX(ctx context.Context) *User { + node, err := uq.Only(ctx) + if err != nil { + panic(err) + } + return node +} + +// OnlyID is like Only, but returns the only User ID in the query. +// Returns a *NotSingularError when more than one User ID is found. +// Returns a *NotFoundError when no entities are found. +func (uq *UserQuery) OnlyID(ctx context.Context) (id uuid.UUID, err error) { + var ids []uuid.UUID + if ids, err = uq.Limit(2).IDs(setContextOp(ctx, uq.ctx, "OnlyID")); err != nil { + return + } + switch len(ids) { + case 1: + id = ids[0] + case 0: + err = &NotFoundError{user.Label} + default: + err = &NotSingularError{user.Label} + } + return +} + +// OnlyIDX is like OnlyID, but panics if an error occurs. +func (uq *UserQuery) OnlyIDX(ctx context.Context) uuid.UUID { + id, err := uq.OnlyID(ctx) + if err != nil { + panic(err) + } + return id +} + +// All executes the query and returns a list of Users. +func (uq *UserQuery) All(ctx context.Context) ([]*User, error) { + ctx = setContextOp(ctx, uq.ctx, "All") + if err := uq.prepareQuery(ctx); err != nil { + return nil, err + } + qr := querierAll[[]*User, *UserQuery]() + return withInterceptors[[]*User](ctx, uq, qr, uq.inters) +} + +// AllX is like All, but panics if an error occurs. +func (uq *UserQuery) AllX(ctx context.Context) []*User { + nodes, err := uq.All(ctx) + if err != nil { + panic(err) + } + return nodes +} + +// IDs executes the query and returns a list of User IDs. +func (uq *UserQuery) IDs(ctx context.Context) (ids []uuid.UUID, err error) { + if uq.ctx.Unique == nil && uq.path != nil { + uq.Unique(true) + } + ctx = setContextOp(ctx, uq.ctx, "IDs") + if err = uq.Select(user.FieldID).Scan(ctx, &ids); err != nil { + return nil, err + } + return ids, nil +} + +// IDsX is like IDs, but panics if an error occurs. +func (uq *UserQuery) IDsX(ctx context.Context) []uuid.UUID { + ids, err := uq.IDs(ctx) + if err != nil { + panic(err) + } + return ids +} + +// Count returns the count of the given query. +func (uq *UserQuery) Count(ctx context.Context) (int, error) { + ctx = setContextOp(ctx, uq.ctx, "Count") + if err := uq.prepareQuery(ctx); err != nil { + return 0, err + } + return withInterceptors[int](ctx, uq, querierCount[*UserQuery](), uq.inters) +} + +// CountX is like Count, but panics if an error occurs. +func (uq *UserQuery) CountX(ctx context.Context) int { + count, err := uq.Count(ctx) + if err != nil { + panic(err) + } + return count +} + +// Exist returns true if the query has elements in the graph. +func (uq *UserQuery) Exist(ctx context.Context) (bool, error) { + ctx = setContextOp(ctx, uq.ctx, "Exist") + switch _, err := uq.FirstID(ctx); { + case IsNotFound(err): + return false, nil + case err != nil: + return false, fmt.Errorf("ent: check existence: %w", err) + default: + return true, nil + } +} + +// ExistX is like Exist, but panics if an error occurs. +func (uq *UserQuery) ExistX(ctx context.Context) bool { + exist, err := uq.Exist(ctx) + if err != nil { + panic(err) + } + return exist +} + +// Clone returns a duplicate of the UserQuery builder, including all associated steps. It can be +// used to prepare common query builders and use them differently after the clone is made. +func (uq *UserQuery) Clone() *UserQuery { + if uq == nil { + return nil + } + return &UserQuery{ + config: uq.config, + ctx: uq.ctx.Clone(), + order: append([]user.OrderOption{}, uq.order...), + inters: append([]Interceptor{}, uq.inters...), + predicates: append([]predicate.User{}, uq.predicates...), + withAvatarImage: uq.withAvatarImage.Clone(), + withHeaderImage: uq.withHeaderImage.Clone(), + withAuthoredNotes: uq.withAuthoredNotes.Clone(), + withMentionedNotes: uq.withMentionedNotes.Clone(), + // clone intermediate query. + sql: uq.sql.Clone(), + path: uq.path, + } +} + +// WithAvatarImage tells the query-builder to eager-load the nodes that are connected to +// the "avatarImage" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithAvatarImage(opts ...func(*ImageQuery)) *UserQuery { + query := (&ImageClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withAvatarImage = query + return uq +} + +// WithHeaderImage tells the query-builder to eager-load the nodes that are connected to +// the "headerImage" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithHeaderImage(opts ...func(*ImageQuery)) *UserQuery { + query := (&ImageClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withHeaderImage = query + return uq +} + +// WithAuthoredNotes tells the query-builder to eager-load the nodes that are connected to +// the "authoredNotes" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithAuthoredNotes(opts ...func(*NoteQuery)) *UserQuery { + query := (&NoteClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withAuthoredNotes = query + return uq +} + +// WithMentionedNotes tells the query-builder to eager-load the nodes that are connected to +// the "mentionedNotes" edge. The optional arguments are used to configure the query builder of the edge. +func (uq *UserQuery) WithMentionedNotes(opts ...func(*NoteQuery)) *UserQuery { + query := (&NoteClient{config: uq.config}).Query() + for _, opt := range opts { + opt(query) + } + uq.withMentionedNotes = query + return uq +} + +// GroupBy is used to group vertices by one or more fields/columns. +// It is often used with aggregate functions, like: count, max, mean, min, sum. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// Count int `json:"count,omitempty"` +// } +// +// client.User.Query(). +// GroupBy(user.FieldIsRemote). +// Aggregate(ent.Count()). +// Scan(ctx, &v) +func (uq *UserQuery) GroupBy(field string, fields ...string) *UserGroupBy { + uq.ctx.Fields = append([]string{field}, fields...) + grbuild := &UserGroupBy{build: uq} + grbuild.flds = &uq.ctx.Fields + grbuild.label = user.Label + grbuild.scan = grbuild.Scan + return grbuild +} + +// Select allows the selection one or more fields/columns for the given query, +// instead of selecting all fields in the entity. +// +// Example: +// +// var v []struct { +// IsRemote bool `json:"isRemote,omitempty"` +// } +// +// client.User.Query(). +// Select(user.FieldIsRemote). +// Scan(ctx, &v) +func (uq *UserQuery) Select(fields ...string) *UserSelect { + uq.ctx.Fields = append(uq.ctx.Fields, fields...) + sbuild := &UserSelect{UserQuery: uq} + sbuild.label = user.Label + sbuild.flds, sbuild.scan = &uq.ctx.Fields, sbuild.Scan + return sbuild +} + +// Aggregate returns a UserSelect configured with the given aggregations. +func (uq *UserQuery) Aggregate(fns ...AggregateFunc) *UserSelect { + return uq.Select().Aggregate(fns...) +} + +func (uq *UserQuery) prepareQuery(ctx context.Context) error { + for _, inter := range uq.inters { + if inter == nil { + return fmt.Errorf("ent: uninitialized interceptor (forgotten import ent/runtime?)") + } + if trv, ok := inter.(Traverser); ok { + if err := trv.Traverse(ctx, uq); err != nil { + return err + } + } + } + for _, f := range uq.ctx.Fields { + if !user.ValidColumn(f) { + return &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + } + if uq.path != nil { + prev, err := uq.path(ctx) + if err != nil { + return err + } + uq.sql = prev + } + return nil +} + +func (uq *UserQuery) sqlAll(ctx context.Context, hooks ...queryHook) ([]*User, error) { + var ( + nodes = []*User{} + withFKs = uq.withFKs + _spec = uq.querySpec() + loadedTypes = [4]bool{ + uq.withAvatarImage != nil, + uq.withHeaderImage != nil, + uq.withAuthoredNotes != nil, + uq.withMentionedNotes != nil, + } + ) + if uq.withAvatarImage != nil || uq.withHeaderImage != nil { + withFKs = true + } + if withFKs { + _spec.Node.Columns = append(_spec.Node.Columns, user.ForeignKeys...) + } + _spec.ScanValues = func(columns []string) ([]any, error) { + return (*User).scanValues(nil, columns) + } + _spec.Assign = func(columns []string, values []any) error { + node := &User{config: uq.config} + nodes = append(nodes, node) + node.Edges.loadedTypes = loadedTypes + return node.assignValues(columns, values) + } + for i := range hooks { + hooks[i](ctx, _spec) + } + if err := sqlgraph.QueryNodes(ctx, uq.driver, _spec); err != nil { + return nil, err + } + if len(nodes) == 0 { + return nodes, nil + } + if query := uq.withAvatarImage; query != nil { + if err := uq.loadAvatarImage(ctx, query, nodes, nil, + func(n *User, e *Image) { n.Edges.AvatarImage = e }); err != nil { + return nil, err + } + } + if query := uq.withHeaderImage; query != nil { + if err := uq.loadHeaderImage(ctx, query, nodes, nil, + func(n *User, e *Image) { n.Edges.HeaderImage = e }); err != nil { + return nil, err + } + } + if query := uq.withAuthoredNotes; query != nil { + if err := uq.loadAuthoredNotes(ctx, query, nodes, + func(n *User) { n.Edges.AuthoredNotes = []*Note{} }, + func(n *User, e *Note) { n.Edges.AuthoredNotes = append(n.Edges.AuthoredNotes, e) }); err != nil { + return nil, err + } + } + if query := uq.withMentionedNotes; query != nil { + if err := uq.loadMentionedNotes(ctx, query, nodes, + func(n *User) { n.Edges.MentionedNotes = []*Note{} }, + func(n *User, e *Note) { n.Edges.MentionedNotes = append(n.Edges.MentionedNotes, e) }); err != nil { + return nil, err + } + } + return nodes, nil +} + +func (uq *UserQuery) loadAvatarImage(ctx context.Context, query *ImageQuery, nodes []*User, init func(*User), assign func(*User, *Image)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*User) + for i := range nodes { + if nodes[i].user_avatar_image == nil { + continue + } + fk := *nodes[i].user_avatar_image + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(image.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_avatar_image" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (uq *UserQuery) loadHeaderImage(ctx context.Context, query *ImageQuery, nodes []*User, init func(*User), assign func(*User, *Image)) error { + ids := make([]int, 0, len(nodes)) + nodeids := make(map[int][]*User) + for i := range nodes { + if nodes[i].user_header_image == nil { + continue + } + fk := *nodes[i].user_header_image + if _, ok := nodeids[fk]; !ok { + ids = append(ids, fk) + } + nodeids[fk] = append(nodeids[fk], nodes[i]) + } + if len(ids) == 0 { + return nil + } + query.Where(image.IDIn(ids...)) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nodeids[n.ID] + if !ok { + return fmt.Errorf(`unexpected foreign-key "user_header_image" returned %v`, n.ID) + } + for i := range nodes { + assign(nodes[i], n) + } + } + return nil +} +func (uq *UserQuery) loadAuthoredNotes(ctx context.Context, query *NoteQuery, nodes []*User, init func(*User), assign func(*User, *Note)) error { + fks := make([]driver.Value, 0, len(nodes)) + nodeids := make(map[uuid.UUID]*User) + for i := range nodes { + fks = append(fks, nodes[i].ID) + nodeids[nodes[i].ID] = nodes[i] + if init != nil { + init(nodes[i]) + } + } + query.withFKs = true + query.Where(predicate.Note(func(s *sql.Selector) { + s.Where(sql.InValues(s.C(user.AuthoredNotesColumn), fks...)) + })) + neighbors, err := query.All(ctx) + if err != nil { + return err + } + for _, n := range neighbors { + fk := n.note_author + if fk == nil { + return fmt.Errorf(`foreign-key "note_author" is nil for node %v`, n.ID) + } + node, ok := nodeids[*fk] + if !ok { + return fmt.Errorf(`unexpected referenced foreign-key "note_author" returned %v for node %v`, *fk, n.ID) + } + assign(node, n) + } + return nil +} +func (uq *UserQuery) loadMentionedNotes(ctx context.Context, query *NoteQuery, nodes []*User, init func(*User), assign func(*User, *Note)) error { + edgeIDs := make([]driver.Value, len(nodes)) + byID := make(map[uuid.UUID]*User) + nids := make(map[uuid.UUID]map[*User]struct{}) + for i, node := range nodes { + edgeIDs[i] = node.ID + byID[node.ID] = node + if init != nil { + init(node) + } + } + query.Where(func(s *sql.Selector) { + joinT := sql.Table(user.MentionedNotesTable) + s.Join(joinT).On(s.C(note.FieldID), joinT.C(user.MentionedNotesPrimaryKey[0])) + s.Where(sql.InValues(joinT.C(user.MentionedNotesPrimaryKey[1]), edgeIDs...)) + columns := s.SelectedColumns() + s.Select(joinT.C(user.MentionedNotesPrimaryKey[1])) + s.AppendSelect(columns...) + s.SetDistinct(false) + }) + if err := query.prepareQuery(ctx); err != nil { + return err + } + qr := QuerierFunc(func(ctx context.Context, q Query) (Value, error) { + return query.sqlAll(ctx, func(_ context.Context, spec *sqlgraph.QuerySpec) { + assign := spec.Assign + values := spec.ScanValues + spec.ScanValues = func(columns []string) ([]any, error) { + values, err := values(columns[1:]) + if err != nil { + return nil, err + } + return append([]any{new(uuid.UUID)}, values...), nil + } + spec.Assign = func(columns []string, values []any) error { + outValue := *values[0].(*uuid.UUID) + inValue := *values[1].(*uuid.UUID) + if nids[inValue] == nil { + nids[inValue] = map[*User]struct{}{byID[outValue]: {}} + return assign(columns[1:], values[1:]) + } + nids[inValue][byID[outValue]] = struct{}{} + return nil + } + }) + }) + neighbors, err := withInterceptors[[]*Note](ctx, query, qr, query.inters) + if err != nil { + return err + } + for _, n := range neighbors { + nodes, ok := nids[n.ID] + if !ok { + return fmt.Errorf(`unexpected "mentionedNotes" node returned %v`, n.ID) + } + for kn := range nodes { + assign(kn, n) + } + } + return nil +} + +func (uq *UserQuery) sqlCount(ctx context.Context) (int, error) { + _spec := uq.querySpec() + _spec.Node.Columns = uq.ctx.Fields + if len(uq.ctx.Fields) > 0 { + _spec.Unique = uq.ctx.Unique != nil && *uq.ctx.Unique + } + return sqlgraph.CountNodes(ctx, uq.driver, _spec) +} + +func (uq *UserQuery) querySpec() *sqlgraph.QuerySpec { + _spec := sqlgraph.NewQuerySpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) + _spec.From = uq.sql + if unique := uq.ctx.Unique; unique != nil { + _spec.Unique = *unique + } else if uq.path != nil { + _spec.Unique = true + } + if fields := uq.ctx.Fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for i := range fields { + if fields[i] != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, fields[i]) + } + } + } + if ps := uq.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if limit := uq.ctx.Limit; limit != nil { + _spec.Limit = *limit + } + if offset := uq.ctx.Offset; offset != nil { + _spec.Offset = *offset + } + if ps := uq.order; len(ps) > 0 { + _spec.Order = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + return _spec +} + +func (uq *UserQuery) sqlQuery(ctx context.Context) *sql.Selector { + builder := sql.Dialect(uq.driver.Dialect()) + t1 := builder.Table(user.Table) + columns := uq.ctx.Fields + if len(columns) == 0 { + columns = user.Columns + } + selector := builder.Select(t1.Columns(columns...)...).From(t1) + if uq.sql != nil { + selector = uq.sql + selector.Select(selector.Columns(columns...)...) + } + if uq.ctx.Unique != nil && *uq.ctx.Unique { + selector.Distinct() + } + for _, p := range uq.predicates { + p(selector) + } + for _, p := range uq.order { + p(selector) + } + if offset := uq.ctx.Offset; offset != nil { + // limit is mandatory for offset clause. We start + // with default value, and override it below if needed. + selector.Offset(*offset).Limit(math.MaxInt32) + } + if limit := uq.ctx.Limit; limit != nil { + selector.Limit(*limit) + } + return selector +} + +// UserGroupBy is the group-by builder for User entities. +type UserGroupBy struct { + selector + build *UserQuery +} + +// Aggregate adds the given aggregation functions to the group-by query. +func (ugb *UserGroupBy) Aggregate(fns ...AggregateFunc) *UserGroupBy { + ugb.fns = append(ugb.fns, fns...) + return ugb +} + +// Scan applies the selector query and scans the result into the given value. +func (ugb *UserGroupBy) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, ugb.build.ctx, "GroupBy") + if err := ugb.build.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserGroupBy](ctx, ugb.build, ugb, ugb.build.inters, v) +} + +func (ugb *UserGroupBy) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx).Select() + aggregation := make([]string, 0, len(ugb.fns)) + for _, fn := range ugb.fns { + aggregation = append(aggregation, fn(selector)) + } + if len(selector.SelectedColumns()) == 0 { + columns := make([]string, 0, len(*ugb.flds)+len(ugb.fns)) + for _, f := range *ugb.flds { + columns = append(columns, selector.C(f)) + } + columns = append(columns, aggregation...) + selector.Select(columns...) + } + selector.GroupBy(selector.Columns(*ugb.flds...)...) + if err := selector.Err(); err != nil { + return err + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := ugb.build.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} + +// UserSelect is the builder for selecting fields of User entities. +type UserSelect struct { + *UserQuery + selector +} + +// Aggregate adds the given aggregation functions to the selector query. +func (us *UserSelect) Aggregate(fns ...AggregateFunc) *UserSelect { + us.fns = append(us.fns, fns...) + return us +} + +// Scan applies the selector query and scans the result into the given value. +func (us *UserSelect) Scan(ctx context.Context, v any) error { + ctx = setContextOp(ctx, us.ctx, "Select") + if err := us.prepareQuery(ctx); err != nil { + return err + } + return scanWithInterceptors[*UserQuery, *UserSelect](ctx, us.UserQuery, us, us.inters, v) +} + +func (us *UserSelect) sqlScan(ctx context.Context, root *UserQuery, v any) error { + selector := root.sqlQuery(ctx) + aggregation := make([]string, 0, len(us.fns)) + for _, fn := range us.fns { + aggregation = append(aggregation, fn(selector)) + } + switch n := len(*us.selector.flds); { + case n == 0 && len(aggregation) > 0: + selector.Select(aggregation...) + case n != 0 && len(aggregation) > 0: + selector.AppendSelect(aggregation...) + } + rows := &sql.Rows{} + query, args := selector.Query() + if err := us.driver.Query(ctx, query, args, rows); err != nil { + return err + } + defer rows.Close() + return sql.ScanSlice(rows, v) +} diff --git a/ent/user_update.go b/ent/user_update.go new file mode 100644 index 0000000..7a3014f --- /dev/null +++ b/ent/user_update.go @@ -0,0 +1,1456 @@ +// Code generated by ent, DO NOT EDIT. + +package ent + +import ( + "context" + "crypto/ed25519" + "errors" + "fmt" + "time" + + "entgo.io/ent/dialect/sql" + "entgo.io/ent/dialect/sql/sqlgraph" + "entgo.io/ent/dialect/sql/sqljson" + "entgo.io/ent/schema/field" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent/image" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +// UserUpdate is the builder for updating User entities. +type UserUpdate struct { + config + hooks []Hook + mutation *UserMutation +} + +// Where appends a list predicates to the UserUpdate builder. +func (uu *UserUpdate) Where(ps ...predicate.User) *UserUpdate { + uu.mutation.Where(ps...) + return uu +} + +// SetIsRemote sets the "isRemote" field. +func (uu *UserUpdate) SetIsRemote(b bool) *UserUpdate { + uu.mutation.SetIsRemote(b) + return uu +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (uu *UserUpdate) SetNillableIsRemote(b *bool) *UserUpdate { + if b != nil { + uu.SetIsRemote(*b) + } + return uu +} + +// SetURI sets the "uri" field. +func (uu *UserUpdate) SetURI(s string) *UserUpdate { + uu.mutation.SetURI(s) + return uu +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (uu *UserUpdate) SetNillableURI(s *string) *UserUpdate { + if s != nil { + uu.SetURI(*s) + } + return uu +} + +// SetExtensions sets the "extensions" field. +func (uu *UserUpdate) SetExtensions(l lysand.Extensions) *UserUpdate { + uu.mutation.SetExtensions(l) + return uu +} + +// SetUpdatedAt sets the "updated_at" field. +func (uu *UserUpdate) SetUpdatedAt(t time.Time) *UserUpdate { + uu.mutation.SetUpdatedAt(t) + return uu +} + +// SetUsername sets the "username" field. +func (uu *UserUpdate) SetUsername(s string) *UserUpdate { + uu.mutation.SetUsername(s) + return uu +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (uu *UserUpdate) SetNillableUsername(s *string) *UserUpdate { + if s != nil { + uu.SetUsername(*s) + } + return uu +} + +// SetPasswordHash sets the "passwordHash" field. +func (uu *UserUpdate) SetPasswordHash(b []byte) *UserUpdate { + uu.mutation.SetPasswordHash(b) + return uu +} + +// ClearPasswordHash clears the value of the "passwordHash" field. +func (uu *UserUpdate) ClearPasswordHash() *UserUpdate { + uu.mutation.ClearPasswordHash() + return uu +} + +// SetDisplayName sets the "displayName" field. +func (uu *UserUpdate) SetDisplayName(s string) *UserUpdate { + uu.mutation.SetDisplayName(s) + return uu +} + +// SetNillableDisplayName sets the "displayName" field if the given value is not nil. +func (uu *UserUpdate) SetNillableDisplayName(s *string) *UserUpdate { + if s != nil { + uu.SetDisplayName(*s) + } + return uu +} + +// ClearDisplayName clears the value of the "displayName" field. +func (uu *UserUpdate) ClearDisplayName() *UserUpdate { + uu.mutation.ClearDisplayName() + return uu +} + +// SetBiography sets the "biography" field. +func (uu *UserUpdate) SetBiography(s string) *UserUpdate { + uu.mutation.SetBiography(s) + return uu +} + +// SetNillableBiography sets the "biography" field if the given value is not nil. +func (uu *UserUpdate) SetNillableBiography(s *string) *UserUpdate { + if s != nil { + uu.SetBiography(*s) + } + return uu +} + +// ClearBiography clears the value of the "biography" field. +func (uu *UserUpdate) ClearBiography() *UserUpdate { + uu.mutation.ClearBiography() + return uu +} + +// SetPublicKey sets the "publicKey" field. +func (uu *UserUpdate) SetPublicKey(ek ed25519.PublicKey) *UserUpdate { + uu.mutation.SetPublicKey(ek) + return uu +} + +// SetPrivateKey sets the "privateKey" field. +func (uu *UserUpdate) SetPrivateKey(ek ed25519.PrivateKey) *UserUpdate { + uu.mutation.SetPrivateKey(ek) + return uu +} + +// ClearPrivateKey clears the value of the "privateKey" field. +func (uu *UserUpdate) ClearPrivateKey() *UserUpdate { + uu.mutation.ClearPrivateKey() + return uu +} + +// SetIndexable sets the "indexable" field. +func (uu *UserUpdate) SetIndexable(b bool) *UserUpdate { + uu.mutation.SetIndexable(b) + return uu +} + +// SetNillableIndexable sets the "indexable" field if the given value is not nil. +func (uu *UserUpdate) SetNillableIndexable(b *bool) *UserUpdate { + if b != nil { + uu.SetIndexable(*b) + } + return uu +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (uu *UserUpdate) SetPrivacyLevel(ul user.PrivacyLevel) *UserUpdate { + uu.mutation.SetPrivacyLevel(ul) + return uu +} + +// SetNillablePrivacyLevel sets the "privacyLevel" field if the given value is not nil. +func (uu *UserUpdate) SetNillablePrivacyLevel(ul *user.PrivacyLevel) *UserUpdate { + if ul != nil { + uu.SetPrivacyLevel(*ul) + } + return uu +} + +// SetFields sets the "fields" field. +func (uu *UserUpdate) SetFields(l []lysand.Field) *UserUpdate { + uu.mutation.SetFields(l) + return uu +} + +// AppendFields appends l to the "fields" field. +func (uu *UserUpdate) AppendFields(l []lysand.Field) *UserUpdate { + uu.mutation.AppendFields(l) + return uu +} + +// SetInbox sets the "inbox" field. +func (uu *UserUpdate) SetInbox(s string) *UserUpdate { + uu.mutation.SetInbox(s) + return uu +} + +// SetNillableInbox sets the "inbox" field if the given value is not nil. +func (uu *UserUpdate) SetNillableInbox(s *string) *UserUpdate { + if s != nil { + uu.SetInbox(*s) + } + return uu +} + +// SetFeatured sets the "featured" field. +func (uu *UserUpdate) SetFeatured(s string) *UserUpdate { + uu.mutation.SetFeatured(s) + return uu +} + +// SetNillableFeatured sets the "featured" field if the given value is not nil. +func (uu *UserUpdate) SetNillableFeatured(s *string) *UserUpdate { + if s != nil { + uu.SetFeatured(*s) + } + return uu +} + +// SetFollowers sets the "followers" field. +func (uu *UserUpdate) SetFollowers(s string) *UserUpdate { + uu.mutation.SetFollowers(s) + return uu +} + +// SetNillableFollowers sets the "followers" field if the given value is not nil. +func (uu *UserUpdate) SetNillableFollowers(s *string) *UserUpdate { + if s != nil { + uu.SetFollowers(*s) + } + return uu +} + +// SetFollowing sets the "following" field. +func (uu *UserUpdate) SetFollowing(s string) *UserUpdate { + uu.mutation.SetFollowing(s) + return uu +} + +// SetNillableFollowing sets the "following" field if the given value is not nil. +func (uu *UserUpdate) SetNillableFollowing(s *string) *UserUpdate { + if s != nil { + uu.SetFollowing(*s) + } + return uu +} + +// SetOutbox sets the "outbox" field. +func (uu *UserUpdate) SetOutbox(s string) *UserUpdate { + uu.mutation.SetOutbox(s) + return uu +} + +// SetNillableOutbox sets the "outbox" field if the given value is not nil. +func (uu *UserUpdate) SetNillableOutbox(s *string) *UserUpdate { + if s != nil { + uu.SetOutbox(*s) + } + return uu +} + +// SetAvatarImageID sets the "avatarImage" edge to the Image entity by ID. +func (uu *UserUpdate) SetAvatarImageID(id int) *UserUpdate { + uu.mutation.SetAvatarImageID(id) + return uu +} + +// SetNillableAvatarImageID sets the "avatarImage" edge to the Image entity by ID if the given value is not nil. +func (uu *UserUpdate) SetNillableAvatarImageID(id *int) *UserUpdate { + if id != nil { + uu = uu.SetAvatarImageID(*id) + } + return uu +} + +// SetAvatarImage sets the "avatarImage" edge to the Image entity. +func (uu *UserUpdate) SetAvatarImage(i *Image) *UserUpdate { + return uu.SetAvatarImageID(i.ID) +} + +// SetHeaderImageID sets the "headerImage" edge to the Image entity by ID. +func (uu *UserUpdate) SetHeaderImageID(id int) *UserUpdate { + uu.mutation.SetHeaderImageID(id) + return uu +} + +// SetNillableHeaderImageID sets the "headerImage" edge to the Image entity by ID if the given value is not nil. +func (uu *UserUpdate) SetNillableHeaderImageID(id *int) *UserUpdate { + if id != nil { + uu = uu.SetHeaderImageID(*id) + } + return uu +} + +// SetHeaderImage sets the "headerImage" edge to the Image entity. +func (uu *UserUpdate) SetHeaderImage(i *Image) *UserUpdate { + return uu.SetHeaderImageID(i.ID) +} + +// AddAuthoredNoteIDs adds the "authoredNotes" edge to the Note entity by IDs. +func (uu *UserUpdate) AddAuthoredNoteIDs(ids ...uuid.UUID) *UserUpdate { + uu.mutation.AddAuthoredNoteIDs(ids...) + return uu +} + +// AddAuthoredNotes adds the "authoredNotes" edges to the Note entity. +func (uu *UserUpdate) AddAuthoredNotes(n ...*Note) *UserUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uu.AddAuthoredNoteIDs(ids...) +} + +// AddMentionedNoteIDs adds the "mentionedNotes" edge to the Note entity by IDs. +func (uu *UserUpdate) AddMentionedNoteIDs(ids ...uuid.UUID) *UserUpdate { + uu.mutation.AddMentionedNoteIDs(ids...) + return uu +} + +// AddMentionedNotes adds the "mentionedNotes" edges to the Note entity. +func (uu *UserUpdate) AddMentionedNotes(n ...*Note) *UserUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uu.AddMentionedNoteIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uu *UserUpdate) Mutation() *UserMutation { + return uu.mutation +} + +// ClearAvatarImage clears the "avatarImage" edge to the Image entity. +func (uu *UserUpdate) ClearAvatarImage() *UserUpdate { + uu.mutation.ClearAvatarImage() + return uu +} + +// ClearHeaderImage clears the "headerImage" edge to the Image entity. +func (uu *UserUpdate) ClearHeaderImage() *UserUpdate { + uu.mutation.ClearHeaderImage() + return uu +} + +// ClearAuthoredNotes clears all "authoredNotes" edges to the Note entity. +func (uu *UserUpdate) ClearAuthoredNotes() *UserUpdate { + uu.mutation.ClearAuthoredNotes() + return uu +} + +// RemoveAuthoredNoteIDs removes the "authoredNotes" edge to Note entities by IDs. +func (uu *UserUpdate) RemoveAuthoredNoteIDs(ids ...uuid.UUID) *UserUpdate { + uu.mutation.RemoveAuthoredNoteIDs(ids...) + return uu +} + +// RemoveAuthoredNotes removes "authoredNotes" edges to Note entities. +func (uu *UserUpdate) RemoveAuthoredNotes(n ...*Note) *UserUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uu.RemoveAuthoredNoteIDs(ids...) +} + +// ClearMentionedNotes clears all "mentionedNotes" edges to the Note entity. +func (uu *UserUpdate) ClearMentionedNotes() *UserUpdate { + uu.mutation.ClearMentionedNotes() + return uu +} + +// RemoveMentionedNoteIDs removes the "mentionedNotes" edge to Note entities by IDs. +func (uu *UserUpdate) RemoveMentionedNoteIDs(ids ...uuid.UUID) *UserUpdate { + uu.mutation.RemoveMentionedNoteIDs(ids...) + return uu +} + +// RemoveMentionedNotes removes "mentionedNotes" edges to Note entities. +func (uu *UserUpdate) RemoveMentionedNotes(n ...*Note) *UserUpdate { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uu.RemoveMentionedNoteIDs(ids...) +} + +// Save executes the query and returns the number of nodes affected by the update operation. +func (uu *UserUpdate) Save(ctx context.Context) (int, error) { + uu.defaults() + return withHooks(ctx, uu.sqlSave, uu.mutation, uu.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uu *UserUpdate) SaveX(ctx context.Context) int { + affected, err := uu.Save(ctx) + if err != nil { + panic(err) + } + return affected +} + +// Exec executes the query. +func (uu *UserUpdate) Exec(ctx context.Context) error { + _, err := uu.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uu *UserUpdate) ExecX(ctx context.Context) { + if err := uu.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uu *UserUpdate) defaults() { + if _, ok := uu.mutation.UpdatedAt(); !ok { + v := user.UpdateDefaultUpdatedAt() + uu.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uu *UserUpdate) check() error { + if v, ok := uu.mutation.URI(); ok { + if err := user.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "User.uri": %w`, err)} + } + } + if v, ok := uu.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if v, ok := uu.mutation.DisplayName(); ok { + if err := user.DisplayNameValidator(v); err != nil { + return &ValidationError{Name: "displayName", err: fmt.Errorf(`ent: validator failed for field "User.displayName": %w`, err)} + } + } + if v, ok := uu.mutation.PrivacyLevel(); ok { + if err := user.PrivacyLevelValidator(v); err != nil { + return &ValidationError{Name: "privacyLevel", err: fmt.Errorf(`ent: validator failed for field "User.privacyLevel": %w`, err)} + } + } + if v, ok := uu.mutation.Inbox(); ok { + if err := user.InboxValidator(v); err != nil { + return &ValidationError{Name: "inbox", err: fmt.Errorf(`ent: validator failed for field "User.inbox": %w`, err)} + } + } + if v, ok := uu.mutation.Featured(); ok { + if err := user.FeaturedValidator(v); err != nil { + return &ValidationError{Name: "featured", err: fmt.Errorf(`ent: validator failed for field "User.featured": %w`, err)} + } + } + if v, ok := uu.mutation.Followers(); ok { + if err := user.FollowersValidator(v); err != nil { + return &ValidationError{Name: "followers", err: fmt.Errorf(`ent: validator failed for field "User.followers": %w`, err)} + } + } + if v, ok := uu.mutation.Following(); ok { + if err := user.FollowingValidator(v); err != nil { + return &ValidationError{Name: "following", err: fmt.Errorf(`ent: validator failed for field "User.following": %w`, err)} + } + } + if v, ok := uu.mutation.Outbox(); ok { + if err := user.OutboxValidator(v); err != nil { + return &ValidationError{Name: "outbox", err: fmt.Errorf(`ent: validator failed for field "User.outbox": %w`, err)} + } + } + return nil +} + +func (uu *UserUpdate) sqlSave(ctx context.Context) (n int, err error) { + if err := uu.check(); err != nil { + return n, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) + if ps := uu.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uu.mutation.IsRemote(); ok { + _spec.SetField(user.FieldIsRemote, field.TypeBool, value) + } + if value, ok := uu.mutation.URI(); ok { + _spec.SetField(user.FieldURI, field.TypeString, value) + } + if value, ok := uu.mutation.Extensions(); ok { + _spec.SetField(user.FieldExtensions, field.TypeJSON, value) + } + if value, ok := uu.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := uu.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + } + if value, ok := uu.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeBytes, value) + } + if uu.mutation.PasswordHashCleared() { + _spec.ClearField(user.FieldPasswordHash, field.TypeBytes) + } + if value, ok := uu.mutation.DisplayName(); ok { + _spec.SetField(user.FieldDisplayName, field.TypeString, value) + } + if uu.mutation.DisplayNameCleared() { + _spec.ClearField(user.FieldDisplayName, field.TypeString) + } + if value, ok := uu.mutation.Biography(); ok { + _spec.SetField(user.FieldBiography, field.TypeString, value) + } + if uu.mutation.BiographyCleared() { + _spec.ClearField(user.FieldBiography, field.TypeString) + } + if value, ok := uu.mutation.PublicKey(); ok { + _spec.SetField(user.FieldPublicKey, field.TypeBytes, value) + } + if value, ok := uu.mutation.PrivateKey(); ok { + _spec.SetField(user.FieldPrivateKey, field.TypeBytes, value) + } + if uu.mutation.PrivateKeyCleared() { + _spec.ClearField(user.FieldPrivateKey, field.TypeBytes) + } + if value, ok := uu.mutation.Indexable(); ok { + _spec.SetField(user.FieldIndexable, field.TypeBool, value) + } + if value, ok := uu.mutation.PrivacyLevel(); ok { + _spec.SetField(user.FieldPrivacyLevel, field.TypeEnum, value) + } + if value, ok := uu.mutation.GetFields(); ok { + _spec.SetField(user.FieldFields, field.TypeJSON, value) + } + if value, ok := uu.mutation.AppendedFields(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, user.FieldFields, value) + }) + } + if value, ok := uu.mutation.Inbox(); ok { + _spec.SetField(user.FieldInbox, field.TypeString, value) + } + if value, ok := uu.mutation.Featured(); ok { + _spec.SetField(user.FieldFeatured, field.TypeString, value) + } + if value, ok := uu.mutation.Followers(); ok { + _spec.SetField(user.FieldFollowers, field.TypeString, value) + } + if value, ok := uu.mutation.Following(); ok { + _spec.SetField(user.FieldFollowing, field.TypeString, value) + } + if value, ok := uu.mutation.Outbox(); ok { + _spec.SetField(user.FieldOutbox, field.TypeString, value) + } + if uu.mutation.AvatarImageCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.AvatarImageTable, + Columns: []string{user.AvatarImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.AvatarImageIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.AvatarImageTable, + Columns: []string{user.AvatarImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uu.mutation.HeaderImageCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.HeaderImageTable, + Columns: []string{user.HeaderImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.HeaderImageIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.HeaderImageTable, + Columns: []string{user.HeaderImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uu.mutation.AuthoredNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedAuthoredNotesIDs(); len(nodes) > 0 && !uu.mutation.AuthoredNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.AuthoredNotesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uu.mutation.MentionedNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.RemovedMentionedNotesIDs(); len(nodes) > 0 && !uu.mutation.MentionedNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uu.mutation.MentionedNotesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if n, err = sqlgraph.UpdateNodes(ctx, uu.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return 0, err + } + uu.mutation.done = true + return n, nil +} + +// UserUpdateOne is the builder for updating a single User entity. +type UserUpdateOne struct { + config + fields []string + hooks []Hook + mutation *UserMutation +} + +// SetIsRemote sets the "isRemote" field. +func (uuo *UserUpdateOne) SetIsRemote(b bool) *UserUpdateOne { + uuo.mutation.SetIsRemote(b) + return uuo +} + +// SetNillableIsRemote sets the "isRemote" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableIsRemote(b *bool) *UserUpdateOne { + if b != nil { + uuo.SetIsRemote(*b) + } + return uuo +} + +// SetURI sets the "uri" field. +func (uuo *UserUpdateOne) SetURI(s string) *UserUpdateOne { + uuo.mutation.SetURI(s) + return uuo +} + +// SetNillableURI sets the "uri" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableURI(s *string) *UserUpdateOne { + if s != nil { + uuo.SetURI(*s) + } + return uuo +} + +// SetExtensions sets the "extensions" field. +func (uuo *UserUpdateOne) SetExtensions(l lysand.Extensions) *UserUpdateOne { + uuo.mutation.SetExtensions(l) + return uuo +} + +// SetUpdatedAt sets the "updated_at" field. +func (uuo *UserUpdateOne) SetUpdatedAt(t time.Time) *UserUpdateOne { + uuo.mutation.SetUpdatedAt(t) + return uuo +} + +// SetUsername sets the "username" field. +func (uuo *UserUpdateOne) SetUsername(s string) *UserUpdateOne { + uuo.mutation.SetUsername(s) + return uuo +} + +// SetNillableUsername sets the "username" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableUsername(s *string) *UserUpdateOne { + if s != nil { + uuo.SetUsername(*s) + } + return uuo +} + +// SetPasswordHash sets the "passwordHash" field. +func (uuo *UserUpdateOne) SetPasswordHash(b []byte) *UserUpdateOne { + uuo.mutation.SetPasswordHash(b) + return uuo +} + +// ClearPasswordHash clears the value of the "passwordHash" field. +func (uuo *UserUpdateOne) ClearPasswordHash() *UserUpdateOne { + uuo.mutation.ClearPasswordHash() + return uuo +} + +// SetDisplayName sets the "displayName" field. +func (uuo *UserUpdateOne) SetDisplayName(s string) *UserUpdateOne { + uuo.mutation.SetDisplayName(s) + return uuo +} + +// SetNillableDisplayName sets the "displayName" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableDisplayName(s *string) *UserUpdateOne { + if s != nil { + uuo.SetDisplayName(*s) + } + return uuo +} + +// ClearDisplayName clears the value of the "displayName" field. +func (uuo *UserUpdateOne) ClearDisplayName() *UserUpdateOne { + uuo.mutation.ClearDisplayName() + return uuo +} + +// SetBiography sets the "biography" field. +func (uuo *UserUpdateOne) SetBiography(s string) *UserUpdateOne { + uuo.mutation.SetBiography(s) + return uuo +} + +// SetNillableBiography sets the "biography" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableBiography(s *string) *UserUpdateOne { + if s != nil { + uuo.SetBiography(*s) + } + return uuo +} + +// ClearBiography clears the value of the "biography" field. +func (uuo *UserUpdateOne) ClearBiography() *UserUpdateOne { + uuo.mutation.ClearBiography() + return uuo +} + +// SetPublicKey sets the "publicKey" field. +func (uuo *UserUpdateOne) SetPublicKey(ek ed25519.PublicKey) *UserUpdateOne { + uuo.mutation.SetPublicKey(ek) + return uuo +} + +// SetPrivateKey sets the "privateKey" field. +func (uuo *UserUpdateOne) SetPrivateKey(ek ed25519.PrivateKey) *UserUpdateOne { + uuo.mutation.SetPrivateKey(ek) + return uuo +} + +// ClearPrivateKey clears the value of the "privateKey" field. +func (uuo *UserUpdateOne) ClearPrivateKey() *UserUpdateOne { + uuo.mutation.ClearPrivateKey() + return uuo +} + +// SetIndexable sets the "indexable" field. +func (uuo *UserUpdateOne) SetIndexable(b bool) *UserUpdateOne { + uuo.mutation.SetIndexable(b) + return uuo +} + +// SetNillableIndexable sets the "indexable" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableIndexable(b *bool) *UserUpdateOne { + if b != nil { + uuo.SetIndexable(*b) + } + return uuo +} + +// SetPrivacyLevel sets the "privacyLevel" field. +func (uuo *UserUpdateOne) SetPrivacyLevel(ul user.PrivacyLevel) *UserUpdateOne { + uuo.mutation.SetPrivacyLevel(ul) + return uuo +} + +// SetNillablePrivacyLevel sets the "privacyLevel" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillablePrivacyLevel(ul *user.PrivacyLevel) *UserUpdateOne { + if ul != nil { + uuo.SetPrivacyLevel(*ul) + } + return uuo +} + +// SetFields sets the "fields" field. +func (uuo *UserUpdateOne) SetFields(l []lysand.Field) *UserUpdateOne { + uuo.mutation.SetFields(l) + return uuo +} + +// AppendFields appends l to the "fields" field. +func (uuo *UserUpdateOne) AppendFields(l []lysand.Field) *UserUpdateOne { + uuo.mutation.AppendFields(l) + return uuo +} + +// SetInbox sets the "inbox" field. +func (uuo *UserUpdateOne) SetInbox(s string) *UserUpdateOne { + uuo.mutation.SetInbox(s) + return uuo +} + +// SetNillableInbox sets the "inbox" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableInbox(s *string) *UserUpdateOne { + if s != nil { + uuo.SetInbox(*s) + } + return uuo +} + +// SetFeatured sets the "featured" field. +func (uuo *UserUpdateOne) SetFeatured(s string) *UserUpdateOne { + uuo.mutation.SetFeatured(s) + return uuo +} + +// SetNillableFeatured sets the "featured" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableFeatured(s *string) *UserUpdateOne { + if s != nil { + uuo.SetFeatured(*s) + } + return uuo +} + +// SetFollowers sets the "followers" field. +func (uuo *UserUpdateOne) SetFollowers(s string) *UserUpdateOne { + uuo.mutation.SetFollowers(s) + return uuo +} + +// SetNillableFollowers sets the "followers" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableFollowers(s *string) *UserUpdateOne { + if s != nil { + uuo.SetFollowers(*s) + } + return uuo +} + +// SetFollowing sets the "following" field. +func (uuo *UserUpdateOne) SetFollowing(s string) *UserUpdateOne { + uuo.mutation.SetFollowing(s) + return uuo +} + +// SetNillableFollowing sets the "following" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableFollowing(s *string) *UserUpdateOne { + if s != nil { + uuo.SetFollowing(*s) + } + return uuo +} + +// SetOutbox sets the "outbox" field. +func (uuo *UserUpdateOne) SetOutbox(s string) *UserUpdateOne { + uuo.mutation.SetOutbox(s) + return uuo +} + +// SetNillableOutbox sets the "outbox" field if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableOutbox(s *string) *UserUpdateOne { + if s != nil { + uuo.SetOutbox(*s) + } + return uuo +} + +// SetAvatarImageID sets the "avatarImage" edge to the Image entity by ID. +func (uuo *UserUpdateOne) SetAvatarImageID(id int) *UserUpdateOne { + uuo.mutation.SetAvatarImageID(id) + return uuo +} + +// SetNillableAvatarImageID sets the "avatarImage" edge to the Image entity by ID if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableAvatarImageID(id *int) *UserUpdateOne { + if id != nil { + uuo = uuo.SetAvatarImageID(*id) + } + return uuo +} + +// SetAvatarImage sets the "avatarImage" edge to the Image entity. +func (uuo *UserUpdateOne) SetAvatarImage(i *Image) *UserUpdateOne { + return uuo.SetAvatarImageID(i.ID) +} + +// SetHeaderImageID sets the "headerImage" edge to the Image entity by ID. +func (uuo *UserUpdateOne) SetHeaderImageID(id int) *UserUpdateOne { + uuo.mutation.SetHeaderImageID(id) + return uuo +} + +// SetNillableHeaderImageID sets the "headerImage" edge to the Image entity by ID if the given value is not nil. +func (uuo *UserUpdateOne) SetNillableHeaderImageID(id *int) *UserUpdateOne { + if id != nil { + uuo = uuo.SetHeaderImageID(*id) + } + return uuo +} + +// SetHeaderImage sets the "headerImage" edge to the Image entity. +func (uuo *UserUpdateOne) SetHeaderImage(i *Image) *UserUpdateOne { + return uuo.SetHeaderImageID(i.ID) +} + +// AddAuthoredNoteIDs adds the "authoredNotes" edge to the Note entity by IDs. +func (uuo *UserUpdateOne) AddAuthoredNoteIDs(ids ...uuid.UUID) *UserUpdateOne { + uuo.mutation.AddAuthoredNoteIDs(ids...) + return uuo +} + +// AddAuthoredNotes adds the "authoredNotes" edges to the Note entity. +func (uuo *UserUpdateOne) AddAuthoredNotes(n ...*Note) *UserUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uuo.AddAuthoredNoteIDs(ids...) +} + +// AddMentionedNoteIDs adds the "mentionedNotes" edge to the Note entity by IDs. +func (uuo *UserUpdateOne) AddMentionedNoteIDs(ids ...uuid.UUID) *UserUpdateOne { + uuo.mutation.AddMentionedNoteIDs(ids...) + return uuo +} + +// AddMentionedNotes adds the "mentionedNotes" edges to the Note entity. +func (uuo *UserUpdateOne) AddMentionedNotes(n ...*Note) *UserUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uuo.AddMentionedNoteIDs(ids...) +} + +// Mutation returns the UserMutation object of the builder. +func (uuo *UserUpdateOne) Mutation() *UserMutation { + return uuo.mutation +} + +// ClearAvatarImage clears the "avatarImage" edge to the Image entity. +func (uuo *UserUpdateOne) ClearAvatarImage() *UserUpdateOne { + uuo.mutation.ClearAvatarImage() + return uuo +} + +// ClearHeaderImage clears the "headerImage" edge to the Image entity. +func (uuo *UserUpdateOne) ClearHeaderImage() *UserUpdateOne { + uuo.mutation.ClearHeaderImage() + return uuo +} + +// ClearAuthoredNotes clears all "authoredNotes" edges to the Note entity. +func (uuo *UserUpdateOne) ClearAuthoredNotes() *UserUpdateOne { + uuo.mutation.ClearAuthoredNotes() + return uuo +} + +// RemoveAuthoredNoteIDs removes the "authoredNotes" edge to Note entities by IDs. +func (uuo *UserUpdateOne) RemoveAuthoredNoteIDs(ids ...uuid.UUID) *UserUpdateOne { + uuo.mutation.RemoveAuthoredNoteIDs(ids...) + return uuo +} + +// RemoveAuthoredNotes removes "authoredNotes" edges to Note entities. +func (uuo *UserUpdateOne) RemoveAuthoredNotes(n ...*Note) *UserUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uuo.RemoveAuthoredNoteIDs(ids...) +} + +// ClearMentionedNotes clears all "mentionedNotes" edges to the Note entity. +func (uuo *UserUpdateOne) ClearMentionedNotes() *UserUpdateOne { + uuo.mutation.ClearMentionedNotes() + return uuo +} + +// RemoveMentionedNoteIDs removes the "mentionedNotes" edge to Note entities by IDs. +func (uuo *UserUpdateOne) RemoveMentionedNoteIDs(ids ...uuid.UUID) *UserUpdateOne { + uuo.mutation.RemoveMentionedNoteIDs(ids...) + return uuo +} + +// RemoveMentionedNotes removes "mentionedNotes" edges to Note entities. +func (uuo *UserUpdateOne) RemoveMentionedNotes(n ...*Note) *UserUpdateOne { + ids := make([]uuid.UUID, len(n)) + for i := range n { + ids[i] = n[i].ID + } + return uuo.RemoveMentionedNoteIDs(ids...) +} + +// Where appends a list predicates to the UserUpdate builder. +func (uuo *UserUpdateOne) Where(ps ...predicate.User) *UserUpdateOne { + uuo.mutation.Where(ps...) + return uuo +} + +// Select allows selecting one or more fields (columns) of the returned entity. +// The default is selecting all fields defined in the entity schema. +func (uuo *UserUpdateOne) Select(field string, fields ...string) *UserUpdateOne { + uuo.fields = append([]string{field}, fields...) + return uuo +} + +// Save executes the query and returns the updated User entity. +func (uuo *UserUpdateOne) Save(ctx context.Context) (*User, error) { + uuo.defaults() + return withHooks(ctx, uuo.sqlSave, uuo.mutation, uuo.hooks) +} + +// SaveX is like Save, but panics if an error occurs. +func (uuo *UserUpdateOne) SaveX(ctx context.Context) *User { + node, err := uuo.Save(ctx) + if err != nil { + panic(err) + } + return node +} + +// Exec executes the query on the entity. +func (uuo *UserUpdateOne) Exec(ctx context.Context) error { + _, err := uuo.Save(ctx) + return err +} + +// ExecX is like Exec, but panics if an error occurs. +func (uuo *UserUpdateOne) ExecX(ctx context.Context) { + if err := uuo.Exec(ctx); err != nil { + panic(err) + } +} + +// defaults sets the default values of the builder before save. +func (uuo *UserUpdateOne) defaults() { + if _, ok := uuo.mutation.UpdatedAt(); !ok { + v := user.UpdateDefaultUpdatedAt() + uuo.mutation.SetUpdatedAt(v) + } +} + +// check runs all checks and user-defined validators on the builder. +func (uuo *UserUpdateOne) check() error { + if v, ok := uuo.mutation.URI(); ok { + if err := user.URIValidator(v); err != nil { + return &ValidationError{Name: "uri", err: fmt.Errorf(`ent: validator failed for field "User.uri": %w`, err)} + } + } + if v, ok := uuo.mutation.Username(); ok { + if err := user.UsernameValidator(v); err != nil { + return &ValidationError{Name: "username", err: fmt.Errorf(`ent: validator failed for field "User.username": %w`, err)} + } + } + if v, ok := uuo.mutation.DisplayName(); ok { + if err := user.DisplayNameValidator(v); err != nil { + return &ValidationError{Name: "displayName", err: fmt.Errorf(`ent: validator failed for field "User.displayName": %w`, err)} + } + } + if v, ok := uuo.mutation.PrivacyLevel(); ok { + if err := user.PrivacyLevelValidator(v); err != nil { + return &ValidationError{Name: "privacyLevel", err: fmt.Errorf(`ent: validator failed for field "User.privacyLevel": %w`, err)} + } + } + if v, ok := uuo.mutation.Inbox(); ok { + if err := user.InboxValidator(v); err != nil { + return &ValidationError{Name: "inbox", err: fmt.Errorf(`ent: validator failed for field "User.inbox": %w`, err)} + } + } + if v, ok := uuo.mutation.Featured(); ok { + if err := user.FeaturedValidator(v); err != nil { + return &ValidationError{Name: "featured", err: fmt.Errorf(`ent: validator failed for field "User.featured": %w`, err)} + } + } + if v, ok := uuo.mutation.Followers(); ok { + if err := user.FollowersValidator(v); err != nil { + return &ValidationError{Name: "followers", err: fmt.Errorf(`ent: validator failed for field "User.followers": %w`, err)} + } + } + if v, ok := uuo.mutation.Following(); ok { + if err := user.FollowingValidator(v); err != nil { + return &ValidationError{Name: "following", err: fmt.Errorf(`ent: validator failed for field "User.following": %w`, err)} + } + } + if v, ok := uuo.mutation.Outbox(); ok { + if err := user.OutboxValidator(v); err != nil { + return &ValidationError{Name: "outbox", err: fmt.Errorf(`ent: validator failed for field "User.outbox": %w`, err)} + } + } + return nil +} + +func (uuo *UserUpdateOne) sqlSave(ctx context.Context) (_node *User, err error) { + if err := uuo.check(); err != nil { + return _node, err + } + _spec := sqlgraph.NewUpdateSpec(user.Table, user.Columns, sqlgraph.NewFieldSpec(user.FieldID, field.TypeUUID)) + id, ok := uuo.mutation.ID() + if !ok { + return nil, &ValidationError{Name: "id", err: errors.New(`ent: missing "User.id" for update`)} + } + _spec.Node.ID.Value = id + if fields := uuo.fields; len(fields) > 0 { + _spec.Node.Columns = make([]string, 0, len(fields)) + _spec.Node.Columns = append(_spec.Node.Columns, user.FieldID) + for _, f := range fields { + if !user.ValidColumn(f) { + return nil, &ValidationError{Name: f, err: fmt.Errorf("ent: invalid field %q for query", f)} + } + if f != user.FieldID { + _spec.Node.Columns = append(_spec.Node.Columns, f) + } + } + } + if ps := uuo.mutation.predicates; len(ps) > 0 { + _spec.Predicate = func(selector *sql.Selector) { + for i := range ps { + ps[i](selector) + } + } + } + if value, ok := uuo.mutation.IsRemote(); ok { + _spec.SetField(user.FieldIsRemote, field.TypeBool, value) + } + if value, ok := uuo.mutation.URI(); ok { + _spec.SetField(user.FieldURI, field.TypeString, value) + } + if value, ok := uuo.mutation.Extensions(); ok { + _spec.SetField(user.FieldExtensions, field.TypeJSON, value) + } + if value, ok := uuo.mutation.UpdatedAt(); ok { + _spec.SetField(user.FieldUpdatedAt, field.TypeTime, value) + } + if value, ok := uuo.mutation.Username(); ok { + _spec.SetField(user.FieldUsername, field.TypeString, value) + } + if value, ok := uuo.mutation.PasswordHash(); ok { + _spec.SetField(user.FieldPasswordHash, field.TypeBytes, value) + } + if uuo.mutation.PasswordHashCleared() { + _spec.ClearField(user.FieldPasswordHash, field.TypeBytes) + } + if value, ok := uuo.mutation.DisplayName(); ok { + _spec.SetField(user.FieldDisplayName, field.TypeString, value) + } + if uuo.mutation.DisplayNameCleared() { + _spec.ClearField(user.FieldDisplayName, field.TypeString) + } + if value, ok := uuo.mutation.Biography(); ok { + _spec.SetField(user.FieldBiography, field.TypeString, value) + } + if uuo.mutation.BiographyCleared() { + _spec.ClearField(user.FieldBiography, field.TypeString) + } + if value, ok := uuo.mutation.PublicKey(); ok { + _spec.SetField(user.FieldPublicKey, field.TypeBytes, value) + } + if value, ok := uuo.mutation.PrivateKey(); ok { + _spec.SetField(user.FieldPrivateKey, field.TypeBytes, value) + } + if uuo.mutation.PrivateKeyCleared() { + _spec.ClearField(user.FieldPrivateKey, field.TypeBytes) + } + if value, ok := uuo.mutation.Indexable(); ok { + _spec.SetField(user.FieldIndexable, field.TypeBool, value) + } + if value, ok := uuo.mutation.PrivacyLevel(); ok { + _spec.SetField(user.FieldPrivacyLevel, field.TypeEnum, value) + } + if value, ok := uuo.mutation.GetFields(); ok { + _spec.SetField(user.FieldFields, field.TypeJSON, value) + } + if value, ok := uuo.mutation.AppendedFields(); ok { + _spec.AddModifier(func(u *sql.UpdateBuilder) { + sqljson.Append(u, user.FieldFields, value) + }) + } + if value, ok := uuo.mutation.Inbox(); ok { + _spec.SetField(user.FieldInbox, field.TypeString, value) + } + if value, ok := uuo.mutation.Featured(); ok { + _spec.SetField(user.FieldFeatured, field.TypeString, value) + } + if value, ok := uuo.mutation.Followers(); ok { + _spec.SetField(user.FieldFollowers, field.TypeString, value) + } + if value, ok := uuo.mutation.Following(); ok { + _spec.SetField(user.FieldFollowing, field.TypeString, value) + } + if value, ok := uuo.mutation.Outbox(); ok { + _spec.SetField(user.FieldOutbox, field.TypeString, value) + } + if uuo.mutation.AvatarImageCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.AvatarImageTable, + Columns: []string{user.AvatarImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.AvatarImageIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.AvatarImageTable, + Columns: []string{user.AvatarImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uuo.mutation.HeaderImageCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.HeaderImageTable, + Columns: []string{user.HeaderImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.HeaderImageIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2O, + Inverse: false, + Table: user.HeaderImageTable, + Columns: []string{user.HeaderImageColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(image.FieldID, field.TypeInt), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uuo.mutation.AuthoredNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedAuthoredNotesIDs(); len(nodes) > 0 && !uuo.mutation.AuthoredNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.AuthoredNotesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.O2M, + Inverse: true, + Table: user.AuthoredNotesTable, + Columns: []string{user.AuthoredNotesColumn}, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + if uuo.mutation.MentionedNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.RemovedMentionedNotesIDs(); len(nodes) > 0 && !uuo.mutation.MentionedNotesCleared() { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Clear = append(_spec.Edges.Clear, edge) + } + if nodes := uuo.mutation.MentionedNotesIDs(); len(nodes) > 0 { + edge := &sqlgraph.EdgeSpec{ + Rel: sqlgraph.M2M, + Inverse: true, + Table: user.MentionedNotesTable, + Columns: user.MentionedNotesPrimaryKey, + Bidi: false, + Target: &sqlgraph.EdgeTarget{ + IDSpec: sqlgraph.NewFieldSpec(note.FieldID, field.TypeUUID), + }, + } + for _, k := range nodes { + edge.Target.Nodes = append(edge.Target.Nodes, k) + } + _spec.Edges.Add = append(_spec.Edges.Add, edge) + } + _node = &User{config: uuo.config} + _spec.Assign = _node.assignValues + _spec.ScanValues = _node.scanValues + if err = sqlgraph.UpdateNode(ctx, uuo.driver, _spec); err != nil { + if _, ok := err.(*sqlgraph.NotFoundError); ok { + err = &NotFoundError{user.Label} + } else if sqlgraph.IsConstraintError(err) { + err = &ConstraintError{msg: err.Error(), wrap: err} + } + return nil, err + } + uuo.mutation.done = true + return _node, nil +} diff --git a/entsqlite.go b/entsqlite.go new file mode 100644 index 0000000..9d732e2 --- /dev/null +++ b/entsqlite.go @@ -0,0 +1,29 @@ +package main + +import ( + "database/sql/driver" + + "github.com/pkg/errors" + "github.com/rs/zerolog/log" + "modernc.org/sqlite" +) + +type sqliteDriver struct { + *sqlite.Driver +} + +func (d sqliteDriver) Open(name string) (driver.Conn, error) { + log.Trace().Str("name", name).Msg("Opening SQLite connection") + conn, err := d.Driver.Open(name) + if err != nil { + return conn, err + } + c := conn.(interface { + Exec(stmt string, args []driver.Value) (driver.Result, error) + }) + if _, err := c.Exec("PRAGMA foreign_keys = on;", nil); err != nil { + conn.Close() + return nil, errors.Wrap(err, "failed to enable enable foreign keys") + } + return conn, nil +} diff --git a/fiber_error_handler.go b/fiber_error_handler.go new file mode 100644 index 0000000..5397cd7 --- /dev/null +++ b/fiber_error_handler.go @@ -0,0 +1,32 @@ +package main + +import ( + "errors" + + "git.devminer.xyz/devminer/unitel" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/internal/api_schema" + "github.com/rs/zerolog/log" +) + +func fiberErrorHandler(c *fiber.Ctx, err error) error { + var fiberErr *fiber.Error + var apiErr *api_schema.APIError + + if errors.As(err, &fiberErr) { + apiErr = api_schema.NewAPIError(fiberErr.Code, fiberErr.Error())(nil) + } else if errors.As(err, &apiErr) { + log.Error().Err(apiErr).Msg("API error") + } else { + if hub := unitel.GetHubFromFiberContext(c); hub != nil { + hub.CaptureException(err) + } + + log.Error().Err(err).Msg("Unhandled error") + apiErr = api_schema.NewAPIError(fiber.StatusInternalServerError, "Internal Server Error")(nil) + } + + log.Error().Err(apiErr).Msg("Unhandled error") + + return c.Status(apiErr.StatusCode).JSON(api_schema.NewFailedAPIResponse[any](apiErr)) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..54a9ab1 --- /dev/null +++ b/go.mod @@ -0,0 +1,91 @@ +module github.com/lysand-org/versia-go + +go 1.22.5 + +require ( + entgo.io/ent v0.13.1 + git.devminer.xyz/devminer/unitel v0.0.0-20240811005427-261161b9118d + github.com/Masterminds/semver v1.5.0 + github.com/go-logr/logr v1.4.2 + github.com/go-logr/zerologr v1.2.3 + github.com/gofiber/fiber/v2 v2.52.5 + github.com/google/uuid v1.6.0 + github.com/jackc/pgx/v5 v5.6.0 + github.com/pkg/errors v0.9.1 + github.com/rs/zerolog v1.33.0 + github.com/stretchr/testify v1.9.0 + github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb + github.com/valyala/fasthttp v1.55.0 + modernc.org/sqlite v1.31.1 +) + +require ( + ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 // indirect + github.com/agext/levenshtein v1.2.1 // indirect + github.com/andybalholm/brotli v1.1.0 // indirect + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/dustin/go-humanize v1.0.1 // indirect + github.com/gabriel-vasile/mimetype v1.4.3 // indirect + github.com/getsentry/sentry-go v0.28.1 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-openapi/inflect v0.19.0 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/go-playground/validator/v10 v10.22.0 // indirect + github.com/google/go-cmp v0.6.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.21.0 // indirect + github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect + github.com/hashicorp/hcl/v2 v2.13.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect + github.com/joho/godotenv v1.5.1 // indirect + github.com/klauspost/compress v1.17.9 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/mattn/go-runewidth v0.0.16 // indirect + github.com/mitchellh/go-wordwrap v1.0.1 // indirect + github.com/nats-io/nats.go v1.36.0 // indirect + github.com/nats-io/nkeys v0.4.7 // indirect + github.com/nats-io/nuid v1.0.1 // indirect + github.com/ncruces/go-strftime v0.1.9 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/qustavo/sqlhooks/v2 v2.1.0 // indirect + github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect + github.com/rivo/uniseg v0.4.7 // indirect + github.com/valyala/bytebufferpool v1.0.0 // indirect + github.com/valyala/tcplisten v1.0.0 // indirect + github.com/zclconf/go-cty v1.8.0 // indirect + go.opentelemetry.io/otel v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 // indirect + go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 // indirect + go.opentelemetry.io/otel/metric v1.28.0 // indirect + go.opentelemetry.io/otel/sdk v1.28.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.28.0 // indirect + go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.opentelemetry.io/proto/otlp v1.3.1 // indirect + golang.org/x/crypto v0.26.0 // indirect + golang.org/x/mod v0.19.0 // indirect + golang.org/x/net v0.28.0 // indirect + golang.org/x/sync v0.8.0 // indirect + golang.org/x/sys v0.24.0 // indirect + golang.org/x/text v0.17.0 // indirect + golang.org/x/tools v0.23.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a // indirect + google.golang.org/grpc v1.65.0 // indirect + google.golang.org/protobuf v1.34.2 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect + modernc.org/libc v1.55.3 // indirect + modernc.org/mathutil v1.6.0 // indirect + modernc.org/memory v1.8.0 // indirect + modernc.org/strutil v1.2.0 // indirect + modernc.org/token v1.1.0 // indirect +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..fe1c232 --- /dev/null +++ b/go.sum @@ -0,0 +1,271 @@ +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43 h1:GwdJbXydHCYPedeeLt4x/lrlIISQ4JTH1mRWuE5ZZ14= +ariga.io/atlas v0.19.1-0.20240203083654-5948b60a8e43/go.mod h1:uj3pm+hUTVN/X5yfdBexHlZv+1Xu5u5ZbZx7+CDavNU= +entgo.io/ent v0.13.1 h1:uD8QwN1h6SNphdCCzmkMN3feSUzNnVvV/WIkHKMbzOE= +entgo.io/ent v0.13.1/go.mod h1:qCEmo+biw3ccBn9OyL4ZK5dfpwg++l1Gxwac5B1206A= +git.devminer.xyz/devminer/unitel v0.0.0-20240731151502-6155fc58527e h1:D/evc8i/qVOI4aFB36zIksWjYto5ZJKhzdHP0FFQn8o= +git.devminer.xyz/devminer/unitel v0.0.0-20240731151502-6155fc58527e/go.mod h1:+aDah0giu/eSjnr/pTNC+g6CTPyf4lttIIewNQ+1HaQ= +git.devminer.xyz/devminer/unitel v0.0.0-20240731195827-7274c9b9631b h1:ky/+3Zyhpe7KOPuSKfmHcl7CWZUrEIxKPPKdmo1mjOY= +git.devminer.xyz/devminer/unitel v0.0.0-20240731195827-7274c9b9631b/go.mod h1:+aDah0giu/eSjnr/pTNC+g6CTPyf4lttIIewNQ+1HaQ= +git.devminer.xyz/devminer/unitel v0.0.0-20240810000007-3c254e87680c h1:iV8aKmhDzDEJMpCmidGfJ9v4++yDMAooGyMoCzJII8c= +git.devminer.xyz/devminer/unitel v0.0.0-20240810000007-3c254e87680c/go.mod h1:+aDah0giu/eSjnr/pTNC+g6CTPyf4lttIIewNQ+1HaQ= +git.devminer.xyz/devminer/unitel v0.0.0-20240811005427-261161b9118d h1:J+dcb2+nok6Ps10buowDOM6kA1/PUu0Mmcp5VMhTrho= +git.devminer.xyz/devminer/unitel v0.0.0-20240811005427-261161b9118d/go.mod h1:+aDah0giu/eSjnr/pTNC+g6CTPyf4lttIIewNQ+1HaQ= +github.com/DATA-DOG/go-sqlmock v1.5.0 h1:Shsta01QNfFxHCfpW6YH2STWB0MudeXXEWMr20OEh60= +github.com/DATA-DOG/go-sqlmock v1.5.0/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM= +github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww= +github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/agext/levenshtein v1.2.1 h1:QmvMAjj2aEICytGiWzmxoE0x2KZvE0fvmqMOfy2tjT8= +github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= +github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M= +github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= +github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= +github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= +github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY= +github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto= +github.com/gabriel-vasile/mimetype v1.4.3 h1:in2uUcidCuFcDKtdcBxlR0rJ1+fsokWf+uqxgUFjbI0= +github.com/gabriel-vasile/mimetype v1.4.3/go.mod h1:d8uq/6HKRL6CGdk+aubisF/M5GcPfT7nKyLpA0lbSSk= +github.com/getsentry/sentry-go v0.28.1 h1:zzaSm/vHmGllRM6Tpx1492r0YDzauArdBfkJRtY6P5k= +github.com/getsentry/sentry-go v0.28.1/go.mod h1:1fQZ+7l7eeJ3wYi82q5Hg8GqAPgefRq+FP/QhafYVgg= +github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA= +github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-logr/zerologr v1.2.3 h1:up5N9vcH9Xck3jJkXzgyOxozT14R47IyDODz8LM1KSs= +github.com/go-logr/zerologr v1.2.3/go.mod h1:BxwGo7y5zgSHYR1BjbnHPyF/5ZjVKfKxAZANVu6E8Ho= +github.com/go-openapi/inflect v0.19.0 h1:9jCH9scKIbHeV9m12SmPilScz6krDxKRasNNSNPXu/4= +github.com/go-openapi/inflect v0.19.0/go.mod h1:lHpZVlpIQqLyKwJ4N+YSc9hchQy/i12fJykb83CRBH4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.22.0 h1:k6HsTZ0sTnROkhS//R0O+55JgM8C4Bx7ia+JlgcnOao= +github.com/go-playground/validator/v10 v10.22.0/go.mod h1:dbuPbCMFw/DrkbEynArYaCwl3amGuJotoKCe95atGMM= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= +github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gofiber/fiber/v2 v2.52.5 h1:tWoP1MJQjGEe4GB5TUGOi7P2E0ZMMRx5ZTG4rT+yGMo= +github.com/gofiber/fiber/v2 v2.52.5/go.mod h1:KEOE+cXMhXG0zHc9d8+E38hoX+ZN7bhOtgeF2oT6jrQ= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo= +github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.21.0 h1:CWyXh/jylQWp2dtiV33mY4iSSp6yf4lmn+c7/tN+ObI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.21.0/go.mod h1:nCLIt0w3Ept2NwF8ThLmrppXsfT07oC8k0XNDxd8sVU= +github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k= +github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM= +github.com/hashicorp/hcl/v2 v2.13.0 h1:0Apadu1w6M11dyGFxWnmhhcMjkbAiKCv7G1r/2QgCNc= +github.com/hashicorp/hcl/v2 v2.13.0/go.mod h1:e4z5nxYlWNPdDSNYX+ph14EvWYMFm3eP0zIUqPc2jr0= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0= +github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4= +github.com/klauspost/compress v1.17.9 h1:6KIumPrER1LHsvBVuDa0r5xaG0Es51mhhB9BQB2qeMA= +github.com/klauspost/compress v1.17.9/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= +github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= +github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc= +github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/mitchellh/go-wordwrap v1.0.1 h1:TLuKupo69TCn6TQSyGxwI1EblZZEsQ0vMlAFQflz0v0= +github.com/mitchellh/go-wordwrap v1.0.1/go.mod h1:R62XHJLzvMFRBbcrT7m7WgmE1eOyTSsCt+hzestvNj0= +github.com/nats-io/nats.go v1.36.0 h1:suEUPuWzTSse/XhESwqLxXGuj8vGRuPRoG7MoRN/qyU= +github.com/nats-io/nats.go v1.36.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8= +github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI= +github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc= +github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= +github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= +github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4= +github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls= +github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec= +github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= +github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= +github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/qustavo/sqlhooks/v2 v2.1.0 h1:54yBemHnGHp/7xgT+pxwmIlMSDNYKx5JW5dfRAiCZi0= +github.com/qustavo/sqlhooks/v2 v2.1.0/go.mod h1:aMREyKo7fOKTwiLuWPsaHRXEmtqG4yREztO0idF83AU= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE= +github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo= +github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= +github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= +github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= +github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= +github.com/rs/zerolog v1.33.0 h1:1cU2KZkvPxNyfgEmhHAz/1A9Bz+llsdYzklWFzgp0r8= +github.com/rs/zerolog v1.33.0/go.mod h1:/7mN4D5sKwJLZQ2b/znpjC3/GQWY/xaDXUM0kKWRHss= +github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ= +github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= +github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= +github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb h1:Ywfo8sUltxogBpFuMOFRrrSifO788kAFxmvVw31PtQQ= +github.com/ucarion/urlpath v0.0.0-20200424170820-7ccc79b76bbb/go.mod h1:ikPs9bRWicNw3S7XpJ8sK/smGwU9WcSVU3dy9qahYBM= +github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= +github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= +github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8= +github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM= +github.com/valyala/tcplisten v1.0.0 h1:rBHj/Xf+E1tRGZyWIWwJDiRY0zc1Js+CV5DqwacVSA8= +github.com/valyala/tcplisten v1.0.0/go.mod h1:T0xQ8SeCZGxckz9qRXTfG43PvQ/mcWh7FwZEA7Ioqkc= +github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= +github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +github.com/zclconf/go-cty v1.8.0 h1:s4AvqaeQzJIu3ndv4gVIhplVD0krU+bgrcLSVUnaWuA= +github.com/zclconf/go-cty v1.8.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= +go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo= +go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= +go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0 h1:BJee2iLkfRfl9lc7aFmBwkWxY/RI1RDdXepSF6y8TPE= +go.opentelemetry.io/otel/exporters/stdout/stdoutmetric v1.28.0/go.mod h1:DIzlHs3DRscCIBU3Y9YSzPfScwnYnzfnCd4g8zA7bZc= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0 h1:EVSnY9JbEEW92bEkIYOVMw4q1WJxIAGoFTrtYOzWuRQ= +go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.28.0/go.mod h1:Ea1N1QQryNXpCD0I1fdLibBAIpQuBkznMmkdKrapk1Y= +go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q= +go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s= +go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE= +go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg= +go.opentelemetry.io/otel/sdk/metric v1.28.0 h1:OkuaKgKrgAbYrrY0t92c+cC+2F6hsFNnCQArXCKlg08= +go.opentelemetry.io/otel/sdk/metric v1.28.0/go.mod h1:cWPjykihLAPvXKi4iZc1dpER3Jdq2Z0YLse3moQUCpg= +go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g= +go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= +go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0= +go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.25.0 h1:ypSNr+bnYL2YhwoMt2zPxHFmbAN1KZs/njMG3hxUp30= +golang.org/x/crypto v0.25.0/go.mod h1:T+wALwcMOSE0kXgUAnPAHqTLW+XHgcELELW8VaDgm/M= +golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= +golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.27.0 h1:5K3Njcw06/l2y9vpGCSdcxWOYHOUk3dVNGDXN+FvAys= +golang.org/x/net v0.27.0/go.mod h1:dDi0PyhWNoiUOrAS8uXv/vnScO4wnHQO4mj9fn/RytE= +golang.org/x/net v0.28.0 h1:a9JDOJc5GMUJ0+UDqmLT86WiEy7iWyIhz8gz8E4e5hE= +golang.org/x/net v0.28.0/go.mod h1:yqtgsTWOOnlGLG9GFRrK3++bGOUEkNBoHZc8MEDWPNg= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.24.0 h1:Twjiwq9dn6R1fQcyiK+wQyHWfaz/BJB+YIpzU/Cv3Xg= +golang.org/x/sys v0.24.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= +golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= +golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.23.0 h1:SGsXPZ+2l4JsgaCKkx+FQ9YZ5XEtA1GZYuoDjenLjvg= +golang.org/x/tools v0.23.0/go.mod h1:pnu6ufv6vQkll6szChhK3C3L/ruaIv5eBeztNG8wtsI= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto/googleapis/api v0.0.0-20240730163845-b1a4ccb954bf h1:GillM0Ef0pkZPIB+5iO6SDK+4T9pf6TpaYR6ICD5rVE= +google.golang.org/genproto/googleapis/api v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:OFMYQFHJ4TM3JRlWDZhJbZfra2uqc3WLBZiaaqP4DtU= +google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a h1:KyUe15n7B1YCu+kMmPtlXxgkLQbp+Dw0tCRZf9Sd+CE= +google.golang.org/genproto/googleapis/api v0.0.0-20240808171019-573a1156607a/go.mod h1:4+X6GvPs+25wZKbQq9qyAXrwIRExv7w0Ea6MgZLZiDM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf h1:liao9UHurZLtiEwBgT9LMOnKYsHze6eA6w1KQCMVN2Q= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240730163845-b1a4ccb954bf/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a h1:EKiZZXueP9/T68B8Nl0GAx9cjbQnCId0yP3qPMgaaHs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240808171019-573a1156607a/go.mod h1:Ue6ibwXGpU+dqIcODieyLOcgj7z8+IcskoNIgZxtrFY= +google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc= +google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ= +google.golang.org/protobuf v1.34.2 h1:6xV6lTsCfpGD21XK49h7MhtcApnLqkfYgPcdHftf6hg= +google.golang.org/protobuf v1.34.2/go.mod h1:qYOHts0dSfpeUzUFpOMr/WGzszTmLH+DiWniOlNbLDw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +modernc.org/cc/v4 v4.21.4 h1:3Be/Rdo1fpr8GrQ7IVw9OHtplU4gWbb+wNgeoBMmGLQ= +modernc.org/cc/v4 v4.21.4/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ= +modernc.org/ccgo/v4 v4.19.2 h1:lwQZgvboKD0jBwdaeVCTouxhxAyN6iawF3STraAal8Y= +modernc.org/ccgo/v4 v4.19.2/go.mod h1:ysS3mxiMV38XGRTTcgo0DQTeTmAO4oCmJl1nX9VFI3s= +modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE= +modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ= +modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw= +modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI= +modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4= +modernc.org/libc v1.55.3 h1:AzcW1mhlPNrRtjS5sS+eW2ISCgSOLLNyFzRh/V3Qj/U= +modernc.org/libc v1.55.3/go.mod h1:qFXepLhz+JjFThQ4kzwzOjA/y/artDeg+pcYnY+Q83w= +modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4= +modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo= +modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E= +modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU= +modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4= +modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0= +modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc= +modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss= +modernc.org/sqlite v1.31.1 h1:XVU0VyzxrYHlBhIs1DiEgSl0ZtdnPtbLVy8hSkzxGrs= +modernc.org/sqlite v1.31.1/go.mod h1:UqoylwmTb9F+IqXERT8bW9zzOWN8qwAIcLdzeBZs4hA= +modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA= +modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0= +modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y= +modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM= diff --git a/internal/api_schema/api.go b/internal/api_schema/api.go new file mode 100644 index 0000000..f210b0a --- /dev/null +++ b/internal/api_schema/api.go @@ -0,0 +1,86 @@ +package api_schema + +import ( + "encoding/json" + "errors" + "fmt" + "net/url" + "strings" +) + +type APIError struct { + StatusCode int `json:"status_code"` + Description string `json:"description"` + Metadata map[string]any `json:"metadata,omitempty"` +} + +func (e APIError) Error() string { + if e.Metadata == nil || len(e.Metadata) == 0 { + return fmt.Sprintf("APIError: %d - %s", e.StatusCode, e.Description) + } + + return fmt.Sprintf("APIError: %d - %s, %s", e.StatusCode, e.Description, stringifyErrorMetadata(e.Metadata)) +} + +func stringifyErrorMetadata(m map[string]any) string { + sb := strings.Builder{} + for key, value := range m { + sb.WriteString(fmt.Sprintf("%s=%v, ", key, value)) + } + return strings.TrimSuffix(sb.String(), ", ") +} + +func (e APIError) Equals(other any) bool { + var err *APIError + + switch raw := other.(type) { + case *APIError: + err = raw + default: + return false + } + + return e.StatusCode == err.StatusCode && e.Description == err.Description +} + +func (e APIError) URLEncode() (string, error) { + v := url.Values{} + v.Set("status_code", fmt.Sprintf("%d", e.StatusCode)) + v.Set("description", e.Description) + + if e.Metadata != nil { + b, err := json.Marshal(e.Metadata) + if err != nil { + return "", err + } + + v.Set("metadata", string(b)) + } + + // Fix up spaces because Golang's net/url.URL encodes " " as "+" instead of "%20" + // https://github.com/golang/go/issues/13982 + return strings.ReplaceAll(v.Encode(), "+", "%20"), nil +} + +func NewAPIError(code int, description string) func(metadata map[string]any) *APIError { + return func(metadata map[string]any) *APIError { + return &APIError{StatusCode: code, Description: description, Metadata: metadata} + } +} + +type APIResponse[T any] struct { + Ok bool `json:"ok"` + Data *T `json:"data"` + Error *APIError `json:"error"` +} + +func NewFailedAPIResponse[T any](err error) APIResponse[T] { + var e *APIError + + if errors.As(err, &e) { + } else { + e = NewAPIError(500, err.Error())(nil) + } + + return APIResponse[T]{Ok: false, Error: e} +} diff --git a/internal/api_schema/errors.go b/internal/api_schema/errors.go new file mode 100644 index 0000000..5c469e3 --- /dev/null +++ b/internal/api_schema/errors.go @@ -0,0 +1,15 @@ +package api_schema + +var ( + ErrBadRequest = NewAPIError(400, "Bad request") + ErrInvalidRequestBody = NewAPIError(400, "Invalid request body") + ErrUnauthorized = NewAPIError(401, "Unauthorized") + ErrForbidden = NewAPIError(403, "Forbidden") + ErrNotFound = NewAPIError(404, "Not found") + ErrConflict = NewAPIError(409, "Conflict") + ErrUsernameTaken = NewAPIError(409, "Username is taken") + ErrRateLimitExceeded = NewAPIError(429, "Rate limit exceeded") + + ErrInternalServerError = NewAPIError(500, "Internal server error") + ErrNotImplemented = NewAPIError(501, "Not implemented") +) diff --git a/internal/api_schema/notes.go b/internal/api_schema/notes.go new file mode 100644 index 0000000..6dabc71 --- /dev/null +++ b/internal/api_schema/notes.go @@ -0,0 +1,18 @@ +package api_schema + +import ( + "github.com/google/uuid" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +type Note struct { + ID uuid.UUID `json:"id,string"` +} + +type FetchNoteResponse = APIResponse[Note] + +type CreateNoteRequest struct { + Content string `json:"content" validate:"required,min=1,max=1024"` + Visibility lysand.PublicationVisibility `json:"visibility" validate:"required,oneof=public private direct"` + Mentions []lysand.URL `json:"mentions"` +} diff --git a/internal/api_schema/users.go b/internal/api_schema/users.go new file mode 100644 index 0000000..ad9c4e2 --- /dev/null +++ b/internal/api_schema/users.go @@ -0,0 +1,17 @@ +package api_schema + +import ( + "github.com/google/uuid" +) + +type User struct { + ID uuid.UUID `json:"id,string"` + Username string `json:"username"` +} + +type FetchUserResponse = APIResponse[User] + +type CreateUserRequest struct { + Username string `json:"username" validate:"required,username_regex,min=3,max=32"` + Password string `json:"password" validate:"required,min=8,max=256"` +} diff --git a/internal/database/transaction.go b/internal/database/transaction.go new file mode 100644 index 0000000..ca0256b --- /dev/null +++ b/internal/database/transaction.go @@ -0,0 +1,85 @@ +package database + +import ( + "context" + "sync" + + "git.devminer.xyz/devminer/unitel" + "github.com/lysand-org/versia-go/ent" +) + +func BeginTx(ctx context.Context, db *ent.Client, telemetry *unitel.Telemetry) (*Tx, error) { + span := telemetry.StartSpan(ctx, "db.sql.transaction", "BeginTx") + ctx = span.Context() + + tx, err := db.Tx(ctx) + if err != nil { + return nil, err + } + + return newTx(tx, ctx, span), nil +} + +type TxAction uint8 + +const ( + TxActionRollback TxAction = iota + TxActionCommit +) + +type Tx struct { + *ent.Tx + ctx context.Context + span *unitel.Span + + m sync.Mutex + action TxAction + + finishOnce func() error +} + +func newTx(tx *ent.Tx, ctx context.Context, span *unitel.Span) *Tx { + t := &Tx{ + Tx: tx, + ctx: ctx, + span: span, + } + + t.finishOnce = sync.OnceValue(t.finish) + + return t +} + +func (t *Tx) MarkForCommit() { + t.m.Lock() + defer t.m.Unlock() + + t.action = TxActionCommit +} + +func (t *Tx) finish() error { + t.m.Lock() + defer t.m.Unlock() + defer t.span.End() + + var err error + switch t.action { + case TxActionCommit: + err = t.Tx.Commit() + case TxActionRollback: + err = t.Tx.Rollback() + } + if err != nil { + t.span.CaptureError(err) + } + + return err +} + +func (t *Tx) Context() context.Context { + return t.ctx +} + +func (t *Tx) Finish() error { + return t.finishOnce() +} diff --git a/internal/entity/follow.go b/internal/entity/follow.go new file mode 100644 index 0000000..e8d71ff --- /dev/null +++ b/internal/entity/follow.go @@ -0,0 +1,76 @@ +package entity + +import ( + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +type Follow struct { + *ent.Follow + + URI *lysand.URL + FollowerURI *lysand.URL + FolloweeURI *lysand.URL +} + +func NewFollow(dbFollow *ent.Follow) (*Follow, error) { + f := &Follow{Follow: dbFollow} + + var err error + + f.URI, err = lysand.ParseURL(dbFollow.URI) + if err != nil { + return nil, err + } + + f.FollowerURI, err = lysand.ParseURL(dbFollow.Edges.Follower.URI) + if err != nil { + return nil, err + } + + f.FolloweeURI, err = lysand.ParseURL(dbFollow.Edges.Followee.URI) + if err != nil { + return nil, err + } + + return f, nil +} + +func (f Follow) ToLysand() *lysand.Follow { + return &lysand.Follow{ + Entity: lysand.Entity{ + ID: f.ID, + URI: f.URI, + CreatedAt: lysand.TimeFromStd(f.CreatedAt), + Extensions: f.Extensions, + }, + Author: f.FollowerURI, + Followee: f.FolloweeURI, + } +} + +func (f Follow) ToLysandAccept() *lysand.FollowAccept { + return &lysand.FollowAccept{ + Entity: lysand.Entity{ + ID: f.ID, + URI: f.URI, + CreatedAt: lysand.TimeFromStd(f.CreatedAt), + Extensions: f.Extensions, + }, + Author: f.FolloweeURI, + Follower: f.FollowerURI, + } +} + +func (f Follow) ToLysandReject() *lysand.FollowReject { + return &lysand.FollowReject{ + Entity: lysand.Entity{ + ID: f.ID, + URI: f.URI, + CreatedAt: lysand.TimeFromStd(f.CreatedAt), + Extensions: f.Extensions, + }, + Author: f.FolloweeURI, + Follower: f.FollowerURI, + } +} diff --git a/internal/entity/note.go b/internal/entity/note.go new file mode 100644 index 0000000..74919e3 --- /dev/null +++ b/internal/entity/note.go @@ -0,0 +1,70 @@ +package entity + +import ( + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +type Note struct { + *ent.Note + URI *lysand.URL + Content lysand.TextContentTypeMap + Author *User + Mentions []User + MentionURIs []lysand.URL +} + +func NewNote(dbNote *ent.Note) (*Note, error) { + n := &Note{ + Note: dbNote, + Content: lysand.TextContentTypeMap{ + "text/plain": lysand.TextContent{Content: dbNote.Content}, + }, + Mentions: make([]User, 0, len(dbNote.Edges.Mentions)), + MentionURIs: make([]lysand.URL, 0, len(dbNote.Edges.Mentions)), + } + + var err error + if n.URI, err = lysand.ParseURL(dbNote.URI); err != nil { + return nil, err + } + if n.Author, err = NewUser(dbNote.Edges.Author); err != nil { + return nil, err + } + + for _, m := range dbNote.Edges.Mentions { + u, err := NewUser(m) + if err != nil { + return nil, err + } + + n.Mentions = append(n.Mentions, *u) + n.MentionURIs = append(n.MentionURIs, *u.URI) + } + + return n, nil +} + +func (n Note) ToLysand() lysand.Note { + return lysand.Note{ + Entity: lysand.Entity{ + ID: n.ID, + URI: n.URI, + CreatedAt: lysand.TimeFromStd(n.CreatedAt), + Extensions: n.Extensions, + }, + Author: n.Author.URI, + Content: n.Content, + Category: nil, + Device: nil, + Previews: nil, + Group: nil, + Attachments: nil, + RepliesTo: nil, + Quoting: nil, + Mentions: n.MentionURIs, + Subject: n.Subject, + IsSensitive: &n.IsSensitive, + Visibility: lysand.PublicationVisibility(n.Visibility), + } +} diff --git a/internal/entity/user.go b/internal/entity/user.go new file mode 100644 index 0000000..d44f52d --- /dev/null +++ b/internal/entity/user.go @@ -0,0 +1,139 @@ +package entity + +import ( + "github.com/lysand-org/versia-go/internal/helpers" + "net/url" + + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/internal/utils" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +type User struct { + *ent.User + + URI *lysand.URL + Inbox *lysand.URL + Outbox *lysand.URL + Featured *lysand.URL + Followers *lysand.URL + Following *lysand.URL + + DisplayName string + LysandAvatar lysand.ImageContentTypeMap + LysandBiography lysand.TextContentTypeMap + Signer lysand.Signer +} + +func NewUser(dbUser *ent.User) (*User, error) { + u := &User{User: dbUser} + + u.DisplayName = u.Username + if dbUser.DisplayName != nil { + u.DisplayName = *dbUser.DisplayName + } + + var err error + if u.URI, err = lysand.ParseURL(dbUser.URI); err != nil { + return nil, err + } + if u.Inbox, err = lysand.ParseURL(dbUser.Inbox); err != nil { + return nil, err + } + if u.Outbox, err = lysand.ParseURL(dbUser.Outbox); err != nil { + return nil, err + } + if u.Featured, err = lysand.ParseURL(dbUser.Featured); err != nil { + return nil, err + } + if u.Followers, err = lysand.ParseURL(dbUser.Followers); err != nil { + return nil, err + } + if u.Following, err = lysand.ParseURL(dbUser.Following); err != nil { + return nil, err + } + + u.LysandAvatar = lysandAvatar(dbUser) + u.LysandBiography = lysandBiography(dbUser) + u.Signer = lysand.Signer{ + PrivateKey: dbUser.PrivateKey, + UserURL: u.URI.ToStd(), + } + + return u, nil +} + +func (u User) ToLysand() *lysand.User { + return &lysand.User{ + Entity: lysand.Entity{ + ID: u.ID, + URI: u.URI, + CreatedAt: lysand.TimeFromStd(u.CreatedAt), + Extensions: u.Extensions, + }, + DisplayName: helpers.StringPtr(u.DisplayName), + Username: u.Username, + Avatar: u.LysandAvatar, + Header: imageMap(u.Edges.HeaderImage), + Indexable: u.Indexable, + PublicKey: lysand.PublicKey{ + Actor: utils.UserAPIURL(u.ID), + PublicKey: lysand.SPKIPublicKey(u.PublicKey), + }, + Bio: u.LysandBiography, + Fields: u.Fields, + + Inbox: u.Inbox, + Outbox: u.Outbox, + Featured: u.Featured, + Followers: u.Followers, + Following: u.Following, + + // TODO: Remove these, they got deprecated and moved into an extension + Likes: utils.UserLikesAPIURL(u.ID), + Dislikes: utils.UserDislikesAPIURL(u.ID), + } +} + +func lysandAvatar(u *ent.User) lysand.ImageContentTypeMap { + if avatar := imageMap(u.Edges.AvatarImage); avatar != nil { + return avatar + } + + return lysand.ImageContentTypeMap{ + "image/svg+xml": lysand.ImageContent{ + Content: utils.DefaultAvatarURL(u.ID), + }, + } +} + +func lysandBiography(u *ent.User) lysand.TextContentTypeMap { + if u.Biography == nil { + return nil + } + + // TODO: Render HTML + + return lysand.TextContentTypeMap{ + "text/html": lysand.TextContent{ + Content: *u.Biography, + }, + } +} + +func imageMap(i *ent.Image) lysand.ImageContentTypeMap { + if i == nil { + return nil + } + + u, err := url.Parse(i.URL) + if err != nil { + return nil + } + + return lysand.ImageContentTypeMap{ + i.MimeType: { + Content: (*lysand.URL)(u), + }, + } +} diff --git a/internal/handlers/follow_handler/handler.go b/internal/handlers/follow_handler/handler.go new file mode 100644 index 0000000..1f6ca9f --- /dev/null +++ b/internal/handlers/follow_handler/handler.go @@ -0,0 +1,33 @@ +package follow_handler + +import ( + "github.com/go-logr/logr" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/internal/service" + "github.com/lysand-org/versia-go/pkg/webfinger" +) + +type Handler struct { + followService service.FollowService + federationService service.FederationService + + hostMeta webfinger.HostMeta + + log logr.Logger +} + +func New(followService service.FollowService, federationService service.FederationService, log logr.Logger) *Handler { + return &Handler{ + followService: followService, + federationService: federationService, + + hostMeta: webfinger.NewHostMeta(config.C.PublicAddress), + + log: log.WithName("users"), + } +} + +func (i *Handler) Register(r fiber.Router) { + r.Get("/api/follows/:id", i.GetLysandFollow) +} diff --git a/internal/handlers/follow_handler/lysand_follow_get.go b/internal/handlers/follow_handler/lysand_follow_get.go new file mode 100644 index 0000000..d9b0547 --- /dev/null +++ b/internal/handlers/follow_handler/lysand_follow_get.go @@ -0,0 +1,28 @@ +package follow_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +func (i *Handler) GetLysandFollow(c *fiber.Ctx) error { + parsedRequestedFollowID, err := uuid.Parse(c.Params("id")) + if err != nil { + return api_schema.ErrBadRequest(map[string]any{"reason": "Invalid follow ID"}) + } + + f, err := i.followService.GetFollow(c.UserContext(), parsedRequestedFollowID) + if err != nil { + i.log.Error(err, "Failed to query follow", "id", parsedRequestedFollowID) + + return api_schema.ErrInternalServerError(map[string]any{"reason": "Failed to query follow"}) + } + if f == nil { + return c.Status(fiber.StatusNotFound).JSON(fiber.Map{ + "error": "Follow not found", + }) + } + + return c.JSON(f.ToLysand()) +} diff --git a/internal/handlers/meta_handler/handler.go b/internal/handlers/meta_handler/handler.go new file mode 100644 index 0000000..b27b415 --- /dev/null +++ b/internal/handlers/meta_handler/handler.go @@ -0,0 +1,28 @@ +package meta_handler + +import ( + "github.com/go-logr/logr" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/pkg/webfinger" +) + +type Handler struct { + hostMeta webfinger.HostMeta + + log logr.Logger +} + +func New(log logr.Logger) *Handler { + return &Handler{ + hostMeta: webfinger.NewHostMeta(config.C.PublicAddress), + + log: log.WithName("users"), + } +} + +func (i *Handler) Register(r fiber.Router) { + r.Get("/.well-known/lysand", i.GetLysandServerMetadata) + r.Get("/.well-known/host-meta", i.GetHostMeta) + r.Get("/.well-known/host-meta.json", i.GetHostMetaJSON) +} diff --git a/internal/handlers/meta_handler/lysand_server_metadata_get.go b/internal/handlers/meta_handler/lysand_server_metadata_get.go new file mode 100644 index 0000000..e2400fa --- /dev/null +++ b/internal/handlers/meta_handler/lysand_server_metadata_get.go @@ -0,0 +1,28 @@ +package meta_handler + +import ( + "github.com/Masterminds/semver" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +func (i *Handler) GetLysandServerMetadata(c *fiber.Ctx) error { + return c.JSON(lysand.ServerMetadata{ + // TODO: Get version from build linker flags + Version: semver.MustParse("0.0.0-dev"), + + Name: config.C.InstanceName, + Description: config.C.InstanceDescription, + Website: lysand.URLFromStd(config.C.PublicAddress), + + // TODO: Get more info + Moderators: nil, + Admins: nil, + Logo: nil, + Banner: nil, + + SupportedExtensions: []string{}, + Extensions: map[string]any{}, + }) +} diff --git a/internal/handlers/meta_handler/wellknown_host_meta.go b/internal/handlers/meta_handler/wellknown_host_meta.go new file mode 100644 index 0000000..b84df12 --- /dev/null +++ b/internal/handlers/meta_handler/wellknown_host_meta.go @@ -0,0 +1,23 @@ +package meta_handler + +import ( + "github.com/gofiber/fiber/v2" +) + +func (i *Handler) GetHostMeta(c *fiber.Ctx) error { + if c.Accepts(fiber.MIMEApplicationJSON) != "" { + return i.GetHostMetaJSON(c) + } + + if c.Accepts(fiber.MIMEApplicationXML) != "" { + c.Set(fiber.HeaderContentType, fiber.MIMEApplicationXMLCharsetUTF8) + return c.Send(i.hostMeta.XML) + } + + return c.Status(fiber.StatusNotAcceptable).SendString("Not Acceptable") +} + +func (i *Handler) GetHostMetaJSON(c *fiber.Ctx) error { + c.Set(fiber.HeaderContentType, fiber.MIMEApplicationJSONCharsetUTF8) + return c.Send(i.hostMeta.JSON) +} diff --git a/internal/handlers/note_handler/app_note_create.go b/internal/handlers/note_handler/app_note_create.go new file mode 100644 index 0000000..41fbe47 --- /dev/null +++ b/internal/handlers/note_handler/app_note_create.go @@ -0,0 +1,32 @@ +package note_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +func (i *Handler) CreateNote(c *fiber.Ctx) error { + req := api_schema.CreateNoteRequest{} + if err := c.BodyParser(&req); err != nil { + return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ + "error": "invalid request", + }) + } + if err := i.bodyValidator.Validate(req); err != nil { + return err + } + + n, err := i.noteService.CreateNote(c.UserContext(), req) + if err != nil { + return err + } + if n == nil { + return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ + "error": "failed to create note", + }) + } + + return c.Status(fiber.StatusCreated).JSON(api_schema.Note{ + ID: n.ID, + }) +} diff --git a/internal/handlers/note_handler/app_note_get.go b/internal/handlers/note_handler/app_note_get.go new file mode 100644 index 0000000..66e964a --- /dev/null +++ b/internal/handlers/note_handler/app_note_get.go @@ -0,0 +1,28 @@ +package note_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +func (i *Handler) GetNote(c *fiber.Ctx) error { + parsedRequestedNoteID, err := uuid.Parse(c.Params("id")) + if err != nil { + return api_schema.ErrBadRequest(map[string]any{ + "reason": "Invalid note ID", + }) + } + + u, err := i.noteService.GetNote(c.UserContext(), parsedRequestedNoteID) + if err != nil { + i.log.Error(err, "Failed to query note", "id", parsedRequestedNoteID) + + return api_schema.ErrInternalServerError(map[string]any{"reason": "Failed to query note"}) + } + if u == nil { + return api_schema.ErrNotFound(nil) + } + + return c.JSON(u.ToLysand()) +} diff --git a/internal/handlers/note_handler/handler.go b/internal/handlers/note_handler/handler.go new file mode 100644 index 0000000..a8d59cf --- /dev/null +++ b/internal/handlers/note_handler/handler.go @@ -0,0 +1,35 @@ +package note_handler + +import ( + "github.com/go-logr/logr" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/internal/service" + "github.com/lysand-org/versia-go/internal/validators" + "github.com/lysand-org/versia-go/pkg/webfinger" +) + +type Handler struct { + noteService service.NoteService + bodyValidator validators.BodyValidator + + hostMeta webfinger.HostMeta + + log logr.Logger +} + +func New(noteService service.NoteService, bodyValidator validators.BodyValidator, log logr.Logger) *Handler { + return &Handler{ + noteService: noteService, + bodyValidator: bodyValidator, + + hostMeta: webfinger.NewHostMeta(config.C.PublicAddress), + + log: log.WithName("users"), + } +} + +func (i *Handler) Register(r fiber.Router) { + r.Get("/api/app/notes/:id", i.GetNote) + r.Post("/api/app/notes/", i.CreateNote) +} diff --git a/internal/handlers/user_handler/app_user_create.go b/internal/handlers/user_handler/app_user_create.go new file mode 100644 index 0000000..c70a51d --- /dev/null +++ b/internal/handlers/user_handler/app_user_create.go @@ -0,0 +1,35 @@ +package user_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +func (i *Handler) CreateUser(c *fiber.Ctx) error { + var req api_schema.CreateUserRequest + if err := c.BodyParser(&req); err != nil { + return api_schema.ErrInvalidRequestBody(nil) + } + + if err := i.bodyValidator.Validate(req); err != nil { + return err + } + + u, err := i.userService.NewUser(c.UserContext(), req.Username, req.Password) + if err != nil { + // TODO: Wrap this in a custom error + if ent.IsConstraintError(err) { + return api_schema.ErrUsernameTaken(nil) + } + + i.log.Error(err, "Failed to create user", "username", req.Username) + + return api_schema.ErrInternalServerError(nil) + } + + return c.JSON(api_schema.User{ + ID: u.ID, + Username: u.Username, + }) +} diff --git a/internal/handlers/user_handler/app_user_get.go b/internal/handlers/user_handler/app_user_get.go new file mode 100644 index 0000000..a0bf665 --- /dev/null +++ b/internal/handlers/user_handler/app_user_get.go @@ -0,0 +1,31 @@ +package user_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +func (i *Handler) GetUser(c *fiber.Ctx) error { + parsedRequestedUserID, err := uuid.Parse(c.Params("id")) + if err != nil { + return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ + "error": "Invalid user ID", + }) + } + + u, err := i.userService.GetUserByID(c.UserContext(), parsedRequestedUserID) + if err != nil { + i.log.Error(err, "Failed to query user", "id", parsedRequestedUserID) + + return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ + "error": "Failed to query user", + }) + + } + if u == nil { + return api_schema.ErrNotFound(map[string]any{"id": parsedRequestedUserID}) + } + + return c.JSON(u) +} diff --git a/internal/handlers/user_handler/handler.go b/internal/handlers/user_handler/handler.go new file mode 100644 index 0000000..c7177e7 --- /dev/null +++ b/internal/handlers/user_handler/handler.go @@ -0,0 +1,46 @@ +package user_handler + +import ( + "github.com/go-logr/logr" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/internal/service" + "github.com/lysand-org/versia-go/internal/validators" +) + +type Handler struct { + userService service.UserService + federationService service.FederationService + inboxService service.InboxService + + bodyValidator validators.BodyValidator + requestValidator validators.RequestValidator + + log logr.Logger +} + +func New(userService service.UserService, federationService service.FederationService, inboxService service.InboxService, bodyValidator validators.BodyValidator, requestValidator validators.RequestValidator, log logr.Logger) *Handler { + return &Handler{ + userService: userService, + federationService: federationService, + inboxService: inboxService, + + bodyValidator: bodyValidator, + requestValidator: requestValidator, + + log: log, + } +} + +func (i *Handler) Register(r fiber.Router) { + // TODO: Handle this differently + // There might be other routes that might want to also add their stuff to the robots.txt + r.Get("/robots.txt", i.RobotsTXT) + + r.Get("/.well-known/webfinger", i.Webfinger) + + r.Get("/api/app/users/:id", i.GetUser) + r.Post("/api/app/users/", i.CreateUser) + + r.Get("/api/users/:id", i.GetLysandUser) + r.Post("/api/users/:id/inbox", i.LysandInbox) +} diff --git a/internal/handlers/user_handler/lysand_inbox.go b/internal/handlers/user_handler/lysand_inbox.go new file mode 100644 index 0000000..932ca89 --- /dev/null +++ b/internal/handlers/user_handler/lysand_inbox.go @@ -0,0 +1,57 @@ +package user_handler + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/lysand-org/versia-go/internal/validators/val_impls" + + "github.com/gofiber/fiber/v2" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +func (i *Handler) LysandInbox(c *fiber.Ctx) error { + if err := i.requestValidator.ValidateFiberCtx(c.UserContext(), c); err != nil { + if errors.Is(err, val_impls.ErrInvalidSignature) { + i.log.Error(err, "Invalid signature") + return c.SendStatus(fiber.StatusUnauthorized) + } + + i.log.Error(err, "Failed to validate signature") + return err + } + + var raw json.RawMessage + if err := json.Unmarshal(c.Body(), &raw); err != nil { + i.log.Error(err, "Failed to unmarshal inbox object") + return api_schema.ErrBadRequest(nil) + } + + obj, err := lysand.ParseInboxObject(raw) + if err != nil { + i.log.Error(err, "Failed to parse inbox object") + + if errors.Is(err, lysand.ErrUnknownType{}) { + return api_schema.ErrNotFound(map[string]any{ + "error": fmt.Sprintf("Unknown object type: %s", err.(lysand.ErrUnknownType).Type), + }) + } + + return err + } + + userId, err := uuid.Parse(c.Params("id")) + if err != nil { + return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ + "error": "Invalid user ID", + }) + } + + if err := i.inboxService.Handle(c.UserContext(), obj, userId); err != nil { + i.log.Error(err, "Failed to handle inbox", "user", userId) + } + + return c.SendStatus(fiber.StatusOK) +} diff --git a/internal/handlers/user_handler/lysand_user_get.go b/internal/handlers/user_handler/lysand_user_get.go new file mode 100644 index 0000000..842f690 --- /dev/null +++ b/internal/handlers/user_handler/lysand_user_get.go @@ -0,0 +1,31 @@ +package user_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +func (i *Handler) GetLysandUser(c *fiber.Ctx) error { + parsedRequestedUserID, err := uuid.Parse(c.Params("id")) + if err != nil { + return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ + "error": "Invalid user ID", + }) + } + + u, err := i.userService.GetUserByID(c.UserContext(), parsedRequestedUserID) + if err != nil { + i.log.Error(err, "Failed to query user", "id", parsedRequestedUserID) + + return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ + "error": "Failed to query user", + "id": parsedRequestedUserID, + }) + } + if u == nil { + return api_schema.ErrNotFound(map[string]any{"id": parsedRequestedUserID}) + } + + return c.JSON(u.ToLysand()) +} diff --git a/internal/handlers/user_handler/robots_txt.go b/internal/handlers/user_handler/robots_txt.go new file mode 100644 index 0000000..d84178a --- /dev/null +++ b/internal/handlers/user_handler/robots_txt.go @@ -0,0 +1,9 @@ +package user_handler + +import ( + "github.com/gofiber/fiber/v2" +) + +func (i *Handler) RobotsTXT(c *fiber.Ctx) error { + return c.SendString("") +} diff --git a/internal/handlers/user_handler/wellknown_webfinger.go b/internal/handlers/user_handler/wellknown_webfinger.go new file mode 100644 index 0000000..8ff7dac --- /dev/null +++ b/internal/handlers/user_handler/wellknown_webfinger.go @@ -0,0 +1,32 @@ +package user_handler + +import ( + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/internal/helpers" + "github.com/lysand-org/versia-go/pkg/webfinger" +) + +func (i *Handler) Webfinger(c *fiber.Ctx) error { + userID, err := webfinger.ParseResource(c.Query("resource")) + if err != nil { + return c.Status(fiber.StatusBadRequest).JSON(webfinger.Response{ + Error: helpers.StringPtr(err.Error()), + }) + } + + if userID.Domain != config.C.PublicAddress.Host { + return c.Status(fiber.StatusNotFound).JSON(webfinger.Response{ + Error: helpers.StringPtr("The requested user is a remote user"), + }) + } + + wf, err := i.userService.GetWebfingerForUser(c.UserContext(), userID.ID) + if err != nil { + return c.Status(fiber.StatusInternalServerError).JSON(webfinger.Response{ + Error: helpers.StringPtr("Failed to query user"), + }) + } + + return c.JSON(wf.WebFingerResource()) +} diff --git a/internal/helpers/crypto.go b/internal/helpers/crypto.go new file mode 100644 index 0000000..8cdb8ce --- /dev/null +++ b/internal/helpers/crypto.go @@ -0,0 +1,22 @@ +package helpers + +import ( + "crypto/sha256" + "time" +) + +func HashSHA256(data []byte) []byte { + h := sha256.New() + + h.Write(data) + + return h.Sum(nil) +} + +func ISO8601(t time.Time) string { + return t.Format("2006-01-02T15:04:05Z") +} + +func ParseISO8601(s string) (time.Time, error) { + return time.Parse("2006-01-02T15:04:05Z", s) +} diff --git a/internal/helpers/ptr.go b/internal/helpers/ptr.go new file mode 100644 index 0000000..b9af8d9 --- /dev/null +++ b/internal/helpers/ptr.go @@ -0,0 +1,5 @@ +package helpers + +func StringPtr(s string) *string { + return &s +} diff --git a/internal/repository/repo_impls/follow_repository_impl.go b/internal/repository/repo_impls/follow_repository_impl.go new file mode 100644 index 0000000..b7a8c1e --- /dev/null +++ b/internal/repository/repo_impls/follow_repository_impl.go @@ -0,0 +1,171 @@ +package repo_impls + +import ( + "context" + "errors" + "fmt" + "github.com/lysand-org/versia-go/internal/repository" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/ent/follow" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/internal/utils" +) + +var ErrFollowAlreadyExists = errors.New("follow already exists") + +var _ repository.FollowRepository = (*FollowRepositoryImpl)(nil) + +type FollowRepositoryImpl struct { + db *ent.Client + log logr.Logger + telemetry *unitel.Telemetry +} + +func NewFollowRepositoryImpl(db *ent.Client, log logr.Logger, telemetry *unitel.Telemetry) repository.FollowRepository { + return &FollowRepositoryImpl{ + db: db, + log: log, + telemetry: telemetry, + } +} + +func (i FollowRepositoryImpl) GetByID(ctx context.Context, id uuid.UUID) (*entity.Follow, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.FollowRepositoryImpl.GetByID"). + AddAttribute("followID", id) + defer s.End() + ctx = s.Context() + + f, err := i.db.Follow.Query(). + Where(follow.ID(id)). + WithFollowee(). + WithFollower(). + Only(ctx) + if err != nil { + return nil, err + } + + s.AddAttribute("follower", f.Edges.Follower.URI). + AddAttribute("followee", f.Edges.Followee.URI). + AddAttribute("followURI", f.URI) + + return entity.NewFollow(f) +} + +func (i FollowRepositoryImpl) Follow(ctx context.Context, follower, followee *entity.User) (*entity.Follow, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.FollowRepositoryImpl.Follow"). + AddAttribute("follower", follower.URI). + AddAttribute("followee", followee.URI) + defer s.End() + ctx = s.Context() + + fid := uuid.New() + + fid, err := i.db.Follow.Create(). + SetID(fid). + SetIsRemote(false). + SetURI(utils.UserAPIURL(fid).String()). + SetStatus(follow.StatusPending). + SetFollower(follower.User). + SetFollowee(followee.User). + OnConflictColumns(follow.FollowerColumn, follow.FolloweeColumn). + UpdateStatus(). + ID(ctx) + if err != nil { + if !ent.IsConstraintError(err) { + return nil, err + } + + return nil, ErrFollowAlreadyExists + } + + s.AddAttribute("followID", fid) + + f, err := i.db.Follow.Query(). + Where(follow.ID(fid)). + WithFollowee(). + WithFollower(). + Only(ctx) + if err != nil { + return nil, err + } + + s.AddAttribute("followURI", f.URI) + + return entity.NewFollow(f) +} + +func (i FollowRepositoryImpl) Unfollow(ctx context.Context, follower, followee *entity.User) error { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.FollowRepositoryImpl.Unfollow"). + AddAttribute("follower", follower.URI). + AddAttribute("followee", followee.URI) + defer s.End() + ctx = s.Context() + + n, err := i.db.Follow.Delete(). + Where(matchFollowUsers(follower, followee)). + Exec(ctx) + if err != nil { + s.CaptureError(err) + } else { + s.AddAttribute("deleted", n). + CaptureMessage(fmt.Sprintf("Deleted %d follow(s)", n)) + } + + return nil +} + +func (i FollowRepositoryImpl) AcceptFollow(ctx context.Context, follower, followee *entity.User) error { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.FollowRepositoryImpl.AcceptFollow"). + AddAttribute("follower", follower.URI). + AddAttribute("followee", followee.URI) + defer s.End() + ctx = s.Context() + + n, err := i.db.Follow.Update(). + Where(matchFollowUsers(follower, followee), follow.StatusEQ(follow.StatusPending)). + SetStatus(follow.StatusAccepted). + Save(ctx) + if err != nil { + s.CaptureError(err) + } else { + s.CaptureMessage(fmt.Sprintf("Accepted %d follow(s)", n)) + } + + return err +} + +func (i FollowRepositoryImpl) RejectFollow(ctx context.Context, follower, followee *entity.User) error { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.FollowRepositoryImpl.RejectFollow"). + AddAttribute("follower", follower.URI). + AddAttribute("followee", followee.URI) + defer s.End() + ctx = s.Context() + + n, err := i.db.Follow.Delete(). + Where(follow.And(matchFollowUsers(follower, followee), follow.StatusEQ(follow.StatusPending))). + Exec(ctx) + if err != nil { + s.CaptureError(err) + } else { + s.CaptureMessage(fmt.Sprintf("Deleted %d follow(s)", n)) + } + + return err +} + +func matchFollowUsers(follower, followee *entity.User) predicate.Follow { + return follow.And( + follow.HasFollowerWith( + user.ID(follower.ID), user.ID(followee.ID), + ), + follow.HasFolloweeWith( + user.ID(follower.ID), user.ID(followee.ID), + ), + ) +} diff --git a/internal/repository/repo_impls/manager.go b/internal/repository/repo_impls/manager.go new file mode 100644 index 0000000..327c084 --- /dev/null +++ b/internal/repository/repo_impls/manager.go @@ -0,0 +1,90 @@ +package repo_impls + +import ( + "context" + "github.com/lysand-org/versia-go/internal/repository" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/internal/database" +) + +type Factory[T any] func(db *ent.Client, log logr.Logger, telemetry *unitel.Telemetry) T + +var _ repository.Manager = (*ManagerImpl)(nil) + +type ManagerImpl struct { + users repository.UserRepository + notes repository.NoteRepository + follows repository.FollowRepository + + uRFactory Factory[repository.UserRepository] + nRFactory Factory[repository.NoteRepository] + fRFactory Factory[repository.FollowRepository] + + db *ent.Client + log logr.Logger + telemetry *unitel.Telemetry +} + +func NewManagerImpl(db *ent.Client, telemetry *unitel.Telemetry, log logr.Logger, userRepositoryFunc Factory[repository.UserRepository], noteRepositoryFunc Factory[repository.NoteRepository], followRepositoryFunc Factory[repository.FollowRepository]) *ManagerImpl { + userRepository := userRepositoryFunc(db, log.WithName("users"), telemetry) + noteRepository := noteRepositoryFunc(db, log.WithName("notes"), telemetry) + followRepository := followRepositoryFunc(db, log.WithName("follows"), telemetry) + + return &ManagerImpl{ + users: userRepository, + notes: noteRepository, + follows: followRepository, + + uRFactory: userRepositoryFunc, + nRFactory: noteRepositoryFunc, + fRFactory: followRepositoryFunc, + + db: db, + log: log, + telemetry: telemetry, + } +} + +func (i *ManagerImpl) withDB(db *ent.Client) *ManagerImpl { + return NewManagerImpl(db, i.telemetry, i.log, i.uRFactory, i.nRFactory, i.fRFactory) +} + +func (i *ManagerImpl) Atomic(ctx context.Context, fn func(ctx context.Context, tx repository.Manager) error) error { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.ManagerImpl.Atomic") + defer s.End() + ctx = s.Context() + + tx, err := database.BeginTx(ctx, i.db, i.telemetry) + if err != nil { + return err + } + defer func(tx *database.Tx) { + err := tx.Finish() + if err != nil { + i.log.Error(err, "Failed to finish transaction") + } + }(tx) + + if err := fn(ctx, i.withDB(tx.Client())); err != nil { + return err + } + + tx.MarkForCommit() + + return tx.Finish() +} + +func (i *ManagerImpl) Users() repository.UserRepository { + return i.users +} + +func (i *ManagerImpl) Notes() repository.NoteRepository { + return i.notes +} + +func (i *ManagerImpl) Follows() repository.FollowRepository { + return i.follows +} diff --git a/internal/repository/repo_impls/note_repository_impl.go b/internal/repository/repo_impls/note_repository_impl.go new file mode 100644 index 0000000..4712ec4 --- /dev/null +++ b/internal/repository/repo_impls/note_repository_impl.go @@ -0,0 +1,117 @@ +package repo_impls + +import ( + "context" + "github.com/lysand-org/versia-go/internal/repository" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/ent/note" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/internal/utils" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var _ repository.NoteRepository = (*NoteRepositoryImpl)(nil) + +type NoteRepositoryImpl struct { + db *ent.Client + log logr.Logger + telemetry *unitel.Telemetry +} + +func NewNoteRepositoryImpl(db *ent.Client, log logr.Logger, telemetry *unitel.Telemetry) repository.NoteRepository { + return &NoteRepositoryImpl{ + db: db, + log: log, + telemetry: telemetry, + } +} + +func (i *NoteRepositoryImpl) NewNote(ctx context.Context, author *entity.User, content string, mentions []*entity.User) (*entity.Note, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.NoteRepositoryImpl.NewNote") + defer s.End() + ctx = s.Context() + + nid := uuid.New() + + n, err := i.db.Note.Create(). + SetID(nid). + SetIsRemote(false). + SetURI(utils.NoteAPIURL(nid).String()). + SetAuthor(author.User). + SetContent(content). + AddMentions(utils.MapSlice(mentions, func(m *entity.User) *ent.User { return m.User })...). + Save(ctx) + if err != nil { + return nil, err + } + + n, err = i.db.Note.Query(). + Where(note.ID(nid)). + WithAuthor(). + WithMentions(). + Only(ctx) + if err != nil { + i.log.Error(err, "Failed to query author", "id", nid) + return nil, err + } + + return entity.NewNote(n) +} + +func (i *NoteRepositoryImpl) ImportLysandNote(ctx context.Context, lNote *lysand.Note) (*entity.Note, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.NoteRepositoryImpl.ImportLysandNote") + defer s.End() + ctx = s.Context() + + id, err := i.db.Note.Create(). + SetID(uuid.New()). + SetIsRemote(true). + SetURI(lNote.URI.String()). + OnConflict(). + UpdateNewValues(). + ID(ctx) + if err != nil { + i.log.Error(err, "Failed to import note into database", "uri", lNote.URI) + return nil, err + } + + n, err := i.db.Note.Get(ctx, id) + if err != nil { + i.log.Error(err, "Failed to get imported note", "id", id, "uri", lNote.URI) + return nil, err + } + + i.log.V(2).Info("Imported note into database", "id", id, "uri", lNote.URI) + + return entity.NewNote(n) +} + +func (i *NoteRepositoryImpl) GetByID(ctx context.Context, id uuid.UUID) (*entity.Note, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.NoteRepositoryImpl.LookupByIDOrUsername") + defer s.End() + ctx = s.Context() + + n, err := i.db.Note.Query(). + Where(note.ID(id)). + WithAuthor(). + WithMentions(). + Only(ctx) + if err != nil { + if !ent.IsNotFound(err) { + i.log.Error(err, "Failed to query user", "id", id) + return nil, err + } + + i.log.V(2).Info("User not found in DB", "id", id) + + return nil, nil + } + + i.log.V(2).Info("User found in DB", "id", id) + + return entity.NewNote(n) +} diff --git a/internal/repository/repo_impls/user_repository_impl.go b/internal/repository/repo_impls/user_repository_impl.go new file mode 100644 index 0000000..549327b --- /dev/null +++ b/internal/repository/repo_impls/user_repository_impl.go @@ -0,0 +1,325 @@ +package repo_impls + +import ( + "context" + "crypto/ed25519" + "errors" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/service" + "golang.org/x/crypto/bcrypt" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/ent/predicate" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/internal/utils" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +const bcryptCost = 12 + +var ( + ErrUsernameTaken = errors.New("username taken") + + _ repository.UserRepository = (*UserRepositoryImpl)(nil) +) + +type UserRepositoryImpl struct { + federationService service.FederationService + + db *ent.Client + log logr.Logger + telemetry *unitel.Telemetry +} + +func NewUserRepositoryImpl(federationService service.FederationService, db *ent.Client, log logr.Logger, telemetry *unitel.Telemetry) repository.UserRepository { + return &UserRepositoryImpl{ + federationService: federationService, + db: db, + log: log, + telemetry: telemetry, + } +} + +func (i *UserRepositoryImpl) NewUser(ctx context.Context, username, password string, priv ed25519.PrivateKey, pub ed25519.PublicKey) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.NewUser") + defer s.End() + ctx = s.Context() + + pwHash, err := bcrypt.GenerateFromPassword([]byte(password), bcryptCost) + if err != nil { + return nil, err + } + + uid := uuid.New() + + u, err := i.db.User.Create(). + SetID(uid). + SetIsRemote(false). + SetURI(utils.UserAPIURL(uid).String()). + SetUsername(username). + SetPasswordHash(pwHash). + SetPublicKey(pub). + SetPrivateKey(priv). + SetInbox(utils.UserInboxAPIURL(uid).String()). + SetOutbox(utils.UserOutboxAPIURL(uid).String()). + SetFeatured(utils.UserFeaturedAPIURL(uid).String()). + SetFollowers(utils.UserFollowersAPIURL(uid).String()). + SetFollowing(utils.UserFollowingAPIURL(uid).String()). + Save(ctx) + if err != nil { + if ent.IsConstraintError(err) { + return nil, ErrUsernameTaken + } + + return nil, err + } + + return entity.NewUser(u) +} + +func (i *UserRepositoryImpl) ImportLysandUserByURI(ctx context.Context, uri *lysand.URL) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.ImportLysandUserByURI") + defer s.End() + ctx = s.Context() + + lUser, err := i.federationService.GetUser(ctx, uri) + if err != nil { + i.log.Error(err, "Failed to fetch remote user", "uri", uri) + return nil, err + } + + id, err := i.db.User.Create(). + SetID(uuid.New()). + SetIsRemote(true). + SetURI(lUser.URI.String()). + SetUsername(lUser.Username). + SetNillableDisplayName(lUser.DisplayName). + SetBiography(lUser.Bio.String()). + SetPublicKey(lUser.PublicKey.PublicKey.ToStd()). + SetIndexable(lUser.Indexable). + SetFields(lUser.Fields). + SetExtensions(lUser.Extensions). + SetInbox(lUser.Inbox.String()). + SetOutbox(lUser.Outbox.String()). + SetFeatured(lUser.Featured.String()). + SetFollowers(lUser.Followers.String()). + SetFollowing(lUser.Following.String()). + OnConflict(). + UpdateNewValues(). + ID(ctx) + if err != nil { + i.log.Error(err, "Failed to import user into database", "uri", lUser.URI) + return nil, err + } + + u, err := i.db.User.Get(ctx, id) + if err != nil { + i.log.Error(err, "Failed to get imported user", "id", id, "uri", lUser.URI) + return nil, err + } + + i.log.V(2).Info("Imported user into database", "id", id, "uri", lUser.URI) + + return entity.NewUser(u) +} + +func (i *UserRepositoryImpl) Resolve(ctx context.Context, uri *lysand.URL) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.Resolve") + defer s.End() + ctx = s.Context() + + u, err := i.LookupByURI(ctx, uri) + if err != nil { + return nil, err + } + + // check if the user is already imported + if u == nil { + i.log.V(2).Info("User not found in DB", "uri", uri) + + u, err := i.ImportLysandUserByURI(ctx, uri) + if err != nil { + i.log.Error(err, "Failed to import user", "uri", uri) + return nil, err + } + + return u, nil + } + + i.log.V(2).Info("User found in DB", "uri", uri) + + return u, nil +} + +func (i *UserRepositoryImpl) ResolveMultiple(ctx context.Context, uris []lysand.URL) ([]*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.ResolveMultiple") + defer s.End() + ctx = s.Context() + + us, err := i.LookupByURIs(ctx, uris) + if err != nil { + return nil, err + } + + // TODO: Refactor to use async imports using a work queue +outer: + for _, uri := range uris { + // check if the user is already imported + for _, u := range us { + if uri.String() == u.URI.String() { + i.log.V(2).Info("User found in DB", "uri", uri) + + continue outer + } + } + + i.log.V(2).Info("User not found in DB", "uri", uri) + + importedUser, err := i.ImportLysandUserByURI(ctx, &uri) + if err != nil { + i.log.Error(err, "Failed to import user", "uri", uri) + + continue + } + + i.log.V(2).Info("Imported user", "uri", uri) + + us = append(us, importedUser) + } + + return us, nil +} + +func (i *UserRepositoryImpl) GetByID(ctx context.Context, uid uuid.UUID) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.GetByID") + defer s.End() + ctx = s.Context() + + u, err := i.db.User.Query(). + Where(user.IDEQ(uid)). + WithAvatarImage(). + WithHeaderImage(). + Only(ctx) + if err != nil { + if !ent.IsNotFound(err) { + i.log.Error(err, "Failed to query user", "id", uid) + return nil, err + } + + i.log.V(2).Info("User not found in DB", "id", uid) + + return nil, nil + } + + i.log.V(2).Info("User found in DB", "id", uid) + + return entity.NewUser(u) +} + +func (i *UserRepositoryImpl) GetLocalByID(ctx context.Context, uid uuid.UUID) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.GetLocalByID") + defer s.End() + ctx = s.Context() + + u, err := i.db.User.Query(). + Where(user.And(user.ID(uid), user.IsRemote(false))). + WithAvatarImage(). + WithHeaderImage(). + Only(ctx) + if err != nil { + if !ent.IsNotFound(err) { + i.log.Error(err, "Failed to query local user", "id", uid) + return nil, err + } + + i.log.V(2).Info("Local user not found in DB", "id", uid) + + return nil, nil + } + + i.log.V(2).Info("Local user found in DB", "id", uid) + + return entity.NewUser(u) +} + +func (i *UserRepositoryImpl) LookupByURI(ctx context.Context, uri *lysand.URL) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.LookupByURI") + defer s.End() + ctx = s.Context() + + // check if the user is already imported + u, err := i.db.User.Query(). + Where(user.URI(uri.String())). + Only(ctx) + if err != nil { + if !ent.IsNotFound(err) { + i.log.Error(err, "Failed to query user", "uri", uri) + return nil, err + } + + i.log.V(2).Info("User not found in DB", "uri", uri) + + return nil, nil + } + + i.log.V(2).Info("User found in DB", "uri", uri) + + return entity.NewUser(u) +} + +func (i *UserRepositoryImpl) LookupByURIs(ctx context.Context, uris []lysand.URL) ([]*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.LookupByURIs") + defer s.End() + ctx = s.Context() + + urisStrs := make([]string, 0, len(uris)) + for _, u := range uris { + urisStrs = append(urisStrs, u.String()) + } + + us, err := i.db.User.Query(). + Where(user.URIIn(urisStrs...)). + All(ctx) + if err != nil { + return nil, err + } + + return utils.MapErrorSlice(us, entity.NewUser) +} + +func (i *UserRepositoryImpl) LookupByIDOrUsername(ctx context.Context, idOrUsername string) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "repository/repo_impls.UserRepositoryImpl.LookupByIDOrUsername") + defer s.End() + ctx = s.Context() + + var preds []predicate.User + if u, err := uuid.Parse(idOrUsername); err == nil { + preds = append(preds, user.IDEQ(u)) + } else { + preds = append(preds, user.UsernameEQ(idOrUsername)) + } + + u, err := i.db.User.Query(). + Where(preds...). + WithAvatarImage(). + WithHeaderImage(). + Only(ctx) + if err != nil { + if !ent.IsNotFound(err) { + i.log.Error(err, "Failed to query user", "idOrUsername", idOrUsername) + return nil, err + } + + i.log.V(2).Info("User not found in DB", "idOrUsername", idOrUsername) + + return nil, nil + } + + i.log.V(2).Info("User found in DB", "idOrUsername", idOrUsername, "id", u.ID) + + return entity.NewUser(u) +} diff --git a/internal/repository/repository.go b/internal/repository/repository.go new file mode 100644 index 0000000..ba10e4b --- /dev/null +++ b/internal/repository/repository.go @@ -0,0 +1,49 @@ +package repository + +import ( + "context" + "crypto/ed25519" + + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +type UserRepository interface { + NewUser(ctx context.Context, username, password string, privateKey ed25519.PrivateKey, publicKey ed25519.PublicKey) (*entity.User, error) + ImportLysandUserByURI(ctx context.Context, uri *lysand.URL) (*entity.User, error) + + GetByID(ctx context.Context, id uuid.UUID) (*entity.User, error) + GetLocalByID(ctx context.Context, id uuid.UUID) (*entity.User, error) + + Resolve(ctx context.Context, uri *lysand.URL) (*entity.User, error) + ResolveMultiple(ctx context.Context, uris []lysand.URL) ([]*entity.User, error) + + LookupByURI(ctx context.Context, uri *lysand.URL) (*entity.User, error) + LookupByURIs(ctx context.Context, uris []lysand.URL) ([]*entity.User, error) + LookupByIDOrUsername(ctx context.Context, idOrUsername string) (*entity.User, error) +} + +type FollowRepository interface { + GetByID(ctx context.Context, id uuid.UUID) (*entity.Follow, error) + + Follow(ctx context.Context, follower, followee *entity.User) (*entity.Follow, error) + Unfollow(ctx context.Context, follower, followee *entity.User) error + AcceptFollow(ctx context.Context, follower, followee *entity.User) error + RejectFollow(ctx context.Context, follower, followee *entity.User) error +} + +type NoteRepository interface { + NewNote(ctx context.Context, author *entity.User, content string, mentions []*entity.User) (*entity.Note, error) + ImportLysandNote(ctx context.Context, lNote *lysand.Note) (*entity.Note, error) + + GetByID(ctx context.Context, idOrUsername uuid.UUID) (*entity.Note, error) +} + +type Manager interface { + Atomic(ctx context.Context, fn func(ctx context.Context, tx Manager) error) error + + Users() UserRepository + Notes() NoteRepository + Follows() FollowRepository +} diff --git a/internal/service/service.go b/internal/service/service.go new file mode 100644 index 0000000..a7c1764 --- /dev/null +++ b/internal/service/service.go @@ -0,0 +1,49 @@ +package service + +import ( + "context" + "github.com/lysand-org/versia-go/internal/repository" + + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/pkg/lysand" + "github.com/lysand-org/versia-go/pkg/webfinger" +) + +type UserService interface { + WithRepositories(repositories repository.Manager) UserService + + NewUser(ctx context.Context, username, password string) (*entity.User, error) + + GetUserByID(ctx context.Context, id uuid.UUID) (*entity.User, error) + + GetWebfingerForUser(ctx context.Context, userID string) (*webfinger.User, error) +} + +type FederationService interface { + SendToInbox(ctx context.Context, author *entity.User, target *entity.User, object any) ([]byte, error) + GetUser(ctx context.Context, uri *lysand.URL) (*lysand.User, error) +} + +type InboxService interface { + Handle(ctx context.Context, obj any, userId uuid.UUID) error +} + +type NoteService interface { + CreateNote(ctx context.Context, req api_schema.CreateNoteRequest) (*entity.Note, error) + GetNote(ctx context.Context, id uuid.UUID) (*entity.Note, error) + + ImportLysandNote(ctx context.Context, lNote *lysand.Note) (*entity.Note, error) +} + +type FollowService interface { + NewFollow(ctx context.Context, follower, followee *entity.User) (*entity.Follow, error) + GetFollow(ctx context.Context, id uuid.UUID) (*entity.Follow, error) + + ImportLysandFollow(ctx context.Context, lFollow *lysand.Follow) (*entity.Follow, error) +} + +type TaskService interface { + ScheduleTask(ctx context.Context, type_ string, data any) error +} diff --git a/internal/service/svc_impls/Inbox_service_impl.go b/internal/service/svc_impls/Inbox_service_impl.go new file mode 100644 index 0000000..0a89b14 --- /dev/null +++ b/internal/service/svc_impls/Inbox_service_impl.go @@ -0,0 +1,147 @@ +package svc_impls + +import ( + "context" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/service" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/internal/api_schema" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var _ service.InboxService = (*InboxServiceImpl)(nil) + +type InboxServiceImpl struct { + repositories repository.Manager + + federationService service.FederationService + + telemetry *unitel.Telemetry + log logr.Logger +} + +func NewInboxService(repositories repository.Manager, federationService service.FederationService, telemetry *unitel.Telemetry, log logr.Logger) *InboxServiceImpl { + return &InboxServiceImpl{ + repositories: repositories, + federationService: federationService, + telemetry: telemetry, + log: log, + } +} + +func (i InboxServiceImpl) WithRepositories(repositories repository.Manager) service.InboxService { + return NewInboxService(repositories, i.federationService, i.telemetry, i.log) +} + +func (i InboxServiceImpl) Handle(ctx context.Context, obj any, userId uuid.UUID) error { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.InboxServiceImpl.Handle") + defer s.End() + ctx = s.Context() + + return i.repositories.Atomic(ctx, func(ctx context.Context, tx repository.Manager) error { + i := i.WithRepositories(tx).(*InboxServiceImpl) + + u, err := i.repositories.Users().GetLocalByID(ctx, userId) + if err != nil { + i.log.Error(err, "Failed to get user", "id", userId) + + return api_schema.ErrInternalServerError(nil) + } + if u == nil { + return api_schema.ErrNotFound(map[string]any{ + "id": userId, + }) + } + + // TODO: Implement more types + switch o := obj.(type) { + case lysand.Note: + i.log.Info("Received note", "note", o) + if err := i.handleNote(ctx, o, u); err != nil { + i.log.Error(err, "Failed to handle note", "note", o) + return err + } + + case lysand.Patch: + i.log.Info("Received patch", "patch", o) + case lysand.Follow: + if err := i.handleFollow(ctx, o, u); err != nil { + i.log.Error(err, "Failed to handle follow", "follow", o) + return err + } + case lysand.Undo: + i.log.Info("Received undo", "undo", o) + default: + i.log.Info("Unimplemented object type", "object", obj) + return api_schema.ErrNotImplemented(nil) + } + + return nil + }) +} + +func (i InboxServiceImpl) handleFollow(ctx context.Context, o lysand.Follow, u *entity.User) error { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.InboxServiceImpl.handleFollow") + defer s.End() + ctx = s.Context() + + author, err := i.repositories.Users().Resolve(ctx, o.Author) + if err != nil { + i.log.Error(err, "Failed to resolve author", "author", o.Author) + return err + } + + f, err := i.repositories.Follows().Follow(ctx, author, u) + if err != nil { + // TODO: Handle constraint errors + if ent.IsConstraintError(err) { + i.log.Error(err, "Follow already exists", "user", user.ID, "author", author.ID) + return nil + } + + i.log.Error(err, "Failed to create follow", "user", user.ID, "author", author.ID) + return err + } + + switch u.PrivacyLevel { + case user.PrivacyLevelPublic: + if err := i.repositories.Follows().AcceptFollow(ctx, author, u); err != nil { + i.log.Error(err, "Failed to accept follow", "user", user.ID, "author", author.ID) + return err + } + + if _, err := i.federationService.SendToInbox(ctx, u, author, f.ToLysandAccept()); err != nil { + i.log.Error(err, "Failed to send follow accept to inbox", "user", user.ID, "author", author.ID) + return err + } + + case user.PrivacyLevelRestricted: + case user.PrivacyLevelPrivate: + } + + return nil +} + +func (i InboxServiceImpl) handleNote(ctx context.Context, o lysand.Note, u *entity.User) error { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.InboxServiceImpl.handleNote") + defer s.End() + ctx = s.Context() + + author, err := i.repositories.Users().Resolve(ctx, o.Author) + if err != nil { + i.log.Error(err, "Failed to resolve author", "author", o.Author) + return err + } + + // TODO: Implement + + _ = author + + return nil +} diff --git a/internal/service/svc_impls/federation_service_impl.go b/internal/service/svc_impls/federation_service_impl.go new file mode 100644 index 0000000..0eaac80 --- /dev/null +++ b/internal/service/svc_impls/federation_service_impl.go @@ -0,0 +1,56 @@ +package svc_impls + +import ( + "context" + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/internal/service" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var _ service.FederationService = (*FederationServiceImpl)(nil) + +type FederationServiceImpl struct { + federationClient *lysand.FederationClient + + telemetry *unitel.Telemetry + + log logr.Logger +} + +func NewFederationServiceImpl(federationClient *lysand.FederationClient, telemetry *unitel.Telemetry, log logr.Logger) *FederationServiceImpl { + return &FederationServiceImpl{ + federationClient: federationClient, + telemetry: telemetry, + log: log, + } +} + +func (i FederationServiceImpl) SendToInbox(ctx context.Context, author *entity.User, target *entity.User, object any) ([]byte, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.FederationServiceImpl.SendToInbox") + defer s.End() + ctx = s.Context() + + response, err := i.federationClient.SendToInbox(ctx, author.Signer, target.ToLysand(), object) + if err != nil { + i.log.Error(err, "Failed to send to inbox", "author", author.ID, "target", target.ID) + return response, err + } + + return response, nil +} + +func (i FederationServiceImpl) GetUser(ctx context.Context, uri *lysand.URL) (*lysand.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.FederationServiceImpl.GetUser") + defer s.End() + ctx = s.Context() + + u, err := i.federationClient.GetUser(ctx, uri.ToStd()) + if err != nil { + i.log.Error(err, "Failed to fetch remote user", "uri", uri) + return nil, err + } + + return u, nil +} diff --git a/internal/service/svc_impls/follow_service_impl.go b/internal/service/svc_impls/follow_service_impl.go new file mode 100644 index 0000000..a9ee8c9 --- /dev/null +++ b/internal/service/svc_impls/follow_service_impl.go @@ -0,0 +1,102 @@ +package svc_impls + +import ( + "context" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/service" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var _ service.FollowService = (*FollowServiceImpl)(nil) + +type FollowServiceImpl struct { + federationService service.FederationService + + repositories repository.Manager + + telemetry *unitel.Telemetry + log logr.Logger +} + +func NewFollowServiceImpl(federationService service.FederationService, repositories repository.Manager, telemetry *unitel.Telemetry, log logr.Logger) *FollowServiceImpl { + return &FollowServiceImpl{ + federationService: federationService, + repositories: repositories, + telemetry: telemetry, + log: log, + } +} + +func (i FollowServiceImpl) NewFollow(ctx context.Context, follower, followee *entity.User) (*entity.Follow, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.FollowServiceImpl.NewFollow"). + AddAttribute("follower", follower.URI). + AddAttribute("followee", followee.URI) + defer s.End() + ctx = s.Context() + + f, err := i.repositories.Follows().Follow(ctx, follower, followee) + if err != nil { + i.log.Error(err, "Failed to create follow", "follower", follower.ID, "followee", followee.ID) + return nil, err + } + + s.AddAttribute("followID", f.URI). + AddAttribute("followURI", f.URI) + + i.log.V(2).Info("Created follow", "follower", follower.ID, "followee", followee.ID) + + return f, nil +} + +func (i FollowServiceImpl) GetFollow(ctx context.Context, id uuid.UUID) (*entity.Follow, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.FollowServiceImpl.GetFollow"). + AddAttribute("followID", id) + defer s.End() + ctx = s.Context() + + f, err := i.repositories.Follows().GetByID(ctx, id) + if err != nil { + return nil, err + } else if f != nil { + s.AddAttribute("followURI", f.URI) + } + + return f, nil +} + +func (i FollowServiceImpl) ImportLysandFollow(ctx context.Context, lFollow *lysand.Follow) (*entity.Follow, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.FollowServiceImpl.ImportLysandFollow"). + AddAttribute("uri", lFollow.URI.String()) + defer s.End() + ctx = s.Context() + + var f *entity.Follow + if err := i.repositories.Atomic(ctx, func(ctx context.Context, tx repository.Manager) error { + follower, err := i.repositories.Users().Resolve(ctx, lFollow.Author) + if err != nil { + return err + } + s.AddAttribute("follower", follower.URI) + + followee, err := i.repositories.Users().Resolve(ctx, lFollow.Followee) + if err != nil { + return err + } + s.AddAttribute("followee", followee.URI) + + f, err = i.repositories.Follows().Follow(ctx, follower, followee) + return err + }); err != nil { + return nil, err + } + + s.AddAttribute("followID", f.ID). + AddAttribute("followURI", f.URI) + + return f, nil +} diff --git a/internal/service/svc_impls/note_service_impl.go b/internal/service/svc_impls/note_service_impl.go new file mode 100644 index 0000000..21a59d0 --- /dev/null +++ b/internal/service/svc_impls/note_service_impl.go @@ -0,0 +1,98 @@ +package svc_impls + +import ( + "context" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/service" + "slices" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/api_schema" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/internal/tasks" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var _ service.NoteService = (*NoteServiceImpl)(nil) + +type NoteServiceImpl struct { + federationService service.FederationService + taskService service.TaskService + + repositories repository.Manager + + telemetry *unitel.Telemetry + + log logr.Logger +} + +func NewNoteServiceImpl(federationService service.FederationService, taskService service.TaskService, repositories repository.Manager, telemetry *unitel.Telemetry, log logr.Logger) *NoteServiceImpl { + return &NoteServiceImpl{ + federationService: federationService, + taskService: taskService, + repositories: repositories, + telemetry: telemetry, + log: log, + } +} + +func (i NoteServiceImpl) CreateNote(ctx context.Context, req api_schema.CreateNoteRequest) (*entity.Note, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.NoteServiceImpl.CreateNote") + defer s.End() + ctx = s.Context() + + var n *entity.Note + + if err := i.repositories.Atomic(ctx, func(ctx context.Context, tx repository.Manager) error { + // FIXME: Use the user that created the note + author, err := tx.Users().GetLocalByID(ctx, uuid.MustParse("b6f4bcb5-ac5a-4a87-880a-c7f88f58a172")) + if err != nil { + return err + } + if author == nil { + return api_schema.ErrBadRequest(map[string]any{"reason": "author not found"}) + } + + mentionedUsers, err := i.repositories.Users().ResolveMultiple(ctx, req.Mentions) + if err != nil { + return err + } + + if slices.ContainsFunc(mentionedUsers, func(u *entity.User) bool { return u.ID == author.ID }) { + return api_schema.ErrBadRequest(map[string]any{"reason": "cannot mention self"}) + } + + n, err = tx.Notes().NewNote(ctx, author, req.Content, mentionedUsers) + if err != nil { + return err + } + + if err := i.taskService.ScheduleTask(ctx, tasks.FederateNote, tasks.FederateNoteData{NoteID: n.ID}); err != nil { + return err + } + + return nil + }); err != nil { + return nil, err + } + + return n, nil +} + +func (i NoteServiceImpl) GetNote(ctx context.Context, id uuid.UUID) (*entity.Note, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.NoteServiceImpl.GetUserByID") + defer s.End() + ctx = s.Context() + + return i.repositories.Notes().GetByID(ctx, id) +} + +func (i NoteServiceImpl) ImportLysandNote(ctx context.Context, lNote *lysand.Note) (*entity.Note, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.NoteServiceImpl.ImportLysandNote") + defer s.End() + ctx = s.Context() + + return i.repositories.Notes().ImportLysandNote(ctx, lNote) +} diff --git a/internal/service/svc_impls/task_service_impl.go b/internal/service/svc_impls/task_service_impl.go new file mode 100644 index 0000000..e10b994 --- /dev/null +++ b/internal/service/svc_impls/task_service_impl.go @@ -0,0 +1,49 @@ +package svc_impls + +import ( + "context" + "github.com/lysand-org/versia-go/internal/service" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/lysand-org/versia-go/pkg/taskqueue" +) + +var _ service.TaskService = (*TaskServiceImpl)(nil) + +type TaskServiceImpl struct { + client *taskqueue.Client + + telemetry *unitel.Telemetry + log logr.Logger +} + +func NewTaskServiceImpl(client *taskqueue.Client, telemetry *unitel.Telemetry, log logr.Logger) *TaskServiceImpl { + return &TaskServiceImpl{ + client: client, + + telemetry: telemetry, + log: log, + } +} + +func (i TaskServiceImpl) ScheduleTask(ctx context.Context, type_ string, data any) error { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.TaskServiceImpl.ScheduleTask") + defer s.End() + ctx = s.Context() + + t, err := taskqueue.NewTask(type_, data) + if err != nil { + i.log.Error(err, "Failed to create task", "type", type_) + return err + } + + if err := i.client.Submit(ctx, t); err != nil { + i.log.Error(err, "Failed to schedule task", "type", type_, "taskID", t.ID) + return err + } + + i.log.V(2).Info("Scheduled task", "type", type_, "taskID", t.ID) + + return nil +} diff --git a/internal/service/svc_impls/user_service_impl.go b/internal/service/svc_impls/user_service_impl.go new file mode 100644 index 0000000..f265018 --- /dev/null +++ b/internal/service/svc_impls/user_service_impl.go @@ -0,0 +1,122 @@ +package svc_impls + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "fmt" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/service" + "net/url" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/ent/schema" + "github.com/lysand-org/versia-go/internal/entity" + "github.com/lysand-org/versia-go/internal/utils" + "github.com/lysand-org/versia-go/pkg/webfinger" +) + +var _ service.UserService = (*UserServiceImpl)(nil) + +type UserServiceImpl struct { + repositories repository.Manager + + federationService service.FederationService + + telemetry *unitel.Telemetry + + log logr.Logger +} + +func NewUserServiceImpl(repositories repository.Manager, federationService service.FederationService, telemetry *unitel.Telemetry, log logr.Logger) *UserServiceImpl { + return &UserServiceImpl{ + repositories: repositories, + federationService: federationService, + telemetry: telemetry, + log: log, + } +} + +func (i UserServiceImpl) WithRepositories(repositories repository.Manager) service.UserService { + return NewUserServiceImpl(repositories, i.federationService, i.telemetry, i.log) +} + +func (i UserServiceImpl) NewUser(ctx context.Context, username, password string) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.UserServiceImpl.NewUser") + defer s.End() + ctx = s.Context() + + if err := schema.ValidateUsername(username); err != nil { + return nil, err + } + + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + i.log.Error(err, "Failed to generate ed25519 key pair") + + return nil, err + } + + user, err := i.repositories.Users().NewUser(ctx, username, password, priv, pub) + if err != nil { + i.log.Error(err, "Failed to create user", "username", username) + + return nil, err + } + + i.log.V(2).Info("Create user", "id", user.ID, "uri", user.URI) + + return user, nil +} + +func (i UserServiceImpl) GetUserByID(ctx context.Context, id uuid.UUID) (*entity.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.UserServiceImpl.GetUserByID") + defer s.End() + ctx = s.Context() + + return i.repositories.Users().LookupByIDOrUsername(ctx, id.String()) +} + +func (i UserServiceImpl) GetWebfingerForUser(ctx context.Context, userID string) (*webfinger.User, error) { + s := i.telemetry.StartSpan(ctx, "function", "service/svc_impls.UserServiceImpl.GetWebfingerForUser") + defer s.End() + ctx = s.Context() + + u, err := i.repositories.Users().LookupByIDOrUsername(ctx, userID) + if err != nil { + return nil, err + } + if u == nil { + return nil, fmt.Errorf("user not found") + } + + wf := &webfinger.User{ + UserID: webfinger.UserID{ + ID: u.ID.String(), + // FIXME: Move this away into a service or sth + Domain: config.C.PublicAddress.Host, + }, + URI: utils.UserAPIURL(u.ID).ToStd(), + } + + if u.Edges.AvatarImage != nil { + avatarURL, err := url.Parse(u.Edges.AvatarImage.URL) + if err != nil { + i.log.Error(err, "Failed to parse avatar URL") + + wf.Avatar = utils.DefaultAvatarURL(u.ID).ToStd() + wf.AvatarMIMEType = "image/svg+xml" + } else { + wf.Avatar = avatarURL + wf.AvatarMIMEType = u.Edges.AvatarImage.MimeType + } + } else { + wf.Avatar = utils.DefaultAvatarURL(u.ID).ToStd() + wf.AvatarMIMEType = "image/svg+xml" + } + + return wf, nil +} diff --git a/internal/tasks/federate_follow.go b/internal/tasks/federate_follow.go new file mode 100644 index 0000000..fe52fbc --- /dev/null +++ b/internal/tasks/federate_follow.go @@ -0,0 +1,11 @@ +package tasks + +import "context" + +type FederateFollowData struct { + FollowID string `json:"followID"` +} + +func (t *Handler) FederateFollow(ctx context.Context, data FederateNoteData) error { + return nil +} diff --git a/internal/tasks/federate_note.go b/internal/tasks/federate_note.go new file mode 100644 index 0000000..a673751 --- /dev/null +++ b/internal/tasks/federate_note.go @@ -0,0 +1,48 @@ +package tasks + +import ( + "context" + "github.com/lysand-org/versia-go/internal/repository" + + "github.com/google/uuid" + "github.com/lysand-org/versia-go/internal/entity" +) + +type FederateNoteData struct { + NoteID uuid.UUID `json:"noteID"` +} + +func (t *Handler) FederateNote(ctx context.Context, data FederateNoteData) error { + s := t.telemetry.StartSpan(ctx, "function", "tasks/Handler.FederateNote") + defer s.End() + ctx = s.Context() + + var n *entity.Note + if err := t.repositories.Atomic(ctx, func(ctx context.Context, tx repository.Manager) error { + var err error + n, err = tx.Notes().GetByID(ctx, data.NoteID) + if err != nil { + return err + } + + for _, uu := range n.Mentions { + if !uu.IsRemote { + t.log.V(2).Info("User is not remote", "user", uu.ID) + continue + } + + res, err := t.federationService.SendToInbox(ctx, n.Author, &uu, n.ToLysand()) + if err != nil { + t.log.Error(err, "Failed to send note to remote user", "res", res, "user", uu.ID) + } else { + t.log.V(2).Info("Sent note to remote user", "res", res, "user", uu.ID) + } + } + + return nil + }); err != nil { + return err + } + + return nil +} diff --git a/internal/tasks/handler.go b/internal/tasks/handler.go new file mode 100644 index 0000000..49e8400 --- /dev/null +++ b/internal/tasks/handler.go @@ -0,0 +1,53 @@ +package tasks + +import ( + "context" + "encoding/json" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/service" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/lysand-org/versia-go/pkg/taskqueue" +) + +const ( + FederateNote = "federate_note" + FederateFollow = "federate_follow" +) + +type Handler struct { + federationService service.FederationService + + repositories repository.Manager + + telemetry *unitel.Telemetry + log logr.Logger +} + +func NewHandler(federationService service.FederationService, repositories repository.Manager, telemetry *unitel.Telemetry, log logr.Logger) *Handler { + return &Handler{ + federationService: federationService, + + repositories: repositories, + + telemetry: telemetry, + log: log, + } +} + +func (t *Handler) Register(tq *taskqueue.Client) { + tq.RegisterHandler(FederateNote, parse(t.FederateNote)) + tq.RegisterHandler(FederateFollow, parse(t.FederateFollow)) +} + +func parse[T any](handler func(context.Context, T) error) func(context.Context, taskqueue.Task) error { + return func(ctx context.Context, task taskqueue.Task) error { + var data T + if err := json.Unmarshal(task.Payload, &data); err != nil { + return err + } + + return handler(ctx, data) + } +} diff --git a/internal/utils/mapper.go b/internal/utils/mapper.go new file mode 100644 index 0000000..707d5f0 --- /dev/null +++ b/internal/utils/mapper.go @@ -0,0 +1,52 @@ +package utils + +import "strings" + +func MapSlice[T any, V any](obj []T, transform func(T) V) []V { + vs := make([]V, 0, len(obj)) + + for _, o := range obj { + vs = append(vs, transform(o)) + } + + return vs +} + +type CombinedError struct { + Errors []error +} + +func (e CombinedError) Error() string { + sb := strings.Builder{} + + for i, err := range e.Errors { + sb.WriteString(err.Error()) + + if i < len(e.Errors)-1 { + sb.WriteString("\n") + } + } + + return sb.String() +} + +func MapErrorSlice[T any, V any](obj []T, transform func(T) (V, error)) ([]V, error) { + vs := make([]V, 0, len(obj)) + errs := make([]error, 0, len(obj)) + + for _, o := range obj { + v, err := transform(o) + if err != nil { + errs = append(errs, err) + continue + } + + vs = append(vs, v) + } + + if len(errs) > 0 { + return nil, CombinedError{Errors: errs} + } + + return vs, nil +} diff --git a/internal/utils/urls.go b/internal/utils/urls.go new file mode 100644 index 0000000..247812f --- /dev/null +++ b/internal/utils/urls.go @@ -0,0 +1,75 @@ +package utils + +import ( + "fmt" + "net/url" + + "github.com/google/uuid" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/pkg/lysand" +) + +var dicebearURL = &url.URL{ + Scheme: "https", + Host: "api.dicebear.com", + Path: "9.x/adventurer/svg", +} + +func UserAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: fmt.Sprintf("/api/users/%s/", uuid.String())} + return lysand.URLFromStd(config.C.PublicAddress.ResolveReference(newPath)) +} + +func DefaultAvatarURL(uuid uuid.UUID) *lysand.URL { + u := &url.URL{} + q := u.Query() + q.Set("seed", uuid.String()) + u.RawQuery = q.Encode() + + return lysand.URLFromStd(dicebearURL.ResolveReference(u)) +} + +func UserInboxAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./inbox"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func UserOutboxAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./outbox"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func UserFollowersAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./followers"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func UserFollowingAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./following"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func UserFeaturedAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./featured"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func UserLikesAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./likes"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func UserDislikesAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: "./dislikes"} + return UserAPIURL(uuid).ResolveReference(newPath) +} + +func FollowAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: fmt.Sprintf("/api/follows/%s/", uuid.String())} + return lysand.URLFromStd(config.C.PublicAddress.ResolveReference(newPath)) +} + +func NoteAPIURL(uuid uuid.UUID) *lysand.URL { + newPath := &url.URL{Path: fmt.Sprintf("/api/notes/%s/", uuid.String())} + return lysand.URLFromStd(config.C.PublicAddress.ResolveReference(newPath)) +} diff --git a/internal/validators/val_impls/body_validator_impl.go b/internal/validators/val_impls/body_validator_impl.go new file mode 100644 index 0000000..2f309eb --- /dev/null +++ b/internal/validators/val_impls/body_validator_impl.go @@ -0,0 +1,90 @@ +package val_impls + +import ( + "errors" + "github.com/lysand-org/versia-go/internal/validators" + "reflect" + "strings" + + "github.com/go-logr/logr" + en_locale "github.com/go-playground/locales/en" + universal_translator "github.com/go-playground/universal-translator" + "github.com/go-playground/validator/v10" + en_translations "github.com/go-playground/validator/v10/translations/en" + "github.com/lysand-org/versia-go/ent/schema" + "github.com/lysand-org/versia-go/internal/api_schema" +) + +var _ validators.BodyValidator = (*BodyValidatorImpl)(nil) + +type BodyValidatorImpl struct { + validator *validator.Validate + translator *universal_translator.UniversalTranslator + enTranslator universal_translator.Translator + + log logr.Logger +} + +func NewBodyValidator(log logr.Logger) *BodyValidatorImpl { + en := en_locale.New() + translator := universal_translator.New(en, en) + trans, ok := translator.GetTranslator("en") + if !ok { + panic("failed to get \"en\" translator") + } + + validate := validator.New(validator.WithRequiredStructEnabled()) + if err := en_translations.RegisterDefaultTranslations(validate, trans); err != nil { + panic("failed to register default translations") + } + + validate.RegisterTagNameFunc(func(fld reflect.StructField) string { + name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] + if name == "-" { + return "" + } + return name + }) + + if err := validate.RegisterValidation("username_regex", func(fl validator.FieldLevel) bool { + return schema.ValidateUsername(fl.Field().String()) == nil + }); err != nil { + panic("failed to register username_regex validator") + } + + if err := validate.RegisterTranslation("username_regex", trans, func(ut universal_translator.Translator) error { + return trans.Add("user_regex", "{0} must match '^[a-z0-9_-]+$'!", true) + }, func(ut universal_translator.Translator, fe validator.FieldError) string { + t, _ := ut.T("user_regex", fe.Field()) + return t + }); err != nil { + panic("failed to register user_regex translation") + } + + return &BodyValidatorImpl{ + validator: validate, + translator: translator, + enTranslator: trans, + } +} + +func (i BodyValidatorImpl) Validate(v any) error { + err := i.validator.Struct(v) + if err == nil { + return nil + } + + var invalidValidationError *validator.InvalidValidationError + if errors.As(err, &invalidValidationError) { + panic(invalidValidationError) + } + + i.log.Error(err, "Failed to validate object") + + errs := make([]string, 0) + for _, err := range err.(validator.ValidationErrors) { + errs = append(errs, err.Translate(i.enTranslator)) + } + + return api_schema.ErrBadRequest(map[string]any{"validation": errs}) +} diff --git a/internal/validators/val_impls/request_validator_impl.go b/internal/validators/val_impls/request_validator_impl.go new file mode 100644 index 0000000..17815dd --- /dev/null +++ b/internal/validators/val_impls/request_validator_impl.go @@ -0,0 +1,109 @@ +package val_impls + +import ( + "bytes" + "context" + "errors" + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/gofiber/fiber/v2" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/validators" + "github.com/lysand-org/versia-go/pkg/lysand" + "github.com/valyala/fasthttp/fasthttpadaptor" + "io" + "net/http" +) + +var ( + ErrInvalidSignature = errors.New("invalid signature") + + _ validators.RequestValidator = (*RequestValidatorImpl)(nil) +) + +type RequestValidatorImpl struct { + repositories repository.Manager + + telemetry *unitel.Telemetry + log logr.Logger +} + +func NewRequestValidator(repositories repository.Manager, telemetry *unitel.Telemetry, log logr.Logger) *RequestValidatorImpl { + return &RequestValidatorImpl{ + repositories: repositories, + + telemetry: telemetry, + log: log, + } +} + +func (i RequestValidatorImpl) Validate(ctx context.Context, r *http.Request) error { + s := i.telemetry.StartSpan(ctx, "function", "validator/val_impls.RequestValidatorImpl.Validate") + defer s.End() + ctx = s.Context() + + r = r.WithContext(ctx) + + date, sigHeader, err := lysand.ExtractFederationHeaders(r.Header) + if err != nil { + return err + } + + // TODO: Fetch user from database instead of using the URI + user, err := i.repositories.Users().Resolve(ctx, lysand.URLFromStd(sigHeader.KeyID)) + if err != nil { + return err + } + + body, err := copyBody(r) + if err != nil { + return err + } + + if !(lysand.Verifier{PublicKey: user.PublicKey}).Verify(r.Method, date, r.Host, r.URL.Path, body, sigHeader) { + i.log.Info("signature verification failed", "user", user.URI, "ur", r.URL.Path) + s.CaptureError(ErrInvalidSignature) + + return ErrInvalidSignature + } else { + i.log.V(2).Info("signature verification succeeded", "user", user.URI, "ur", r.URL.Path) + } + + return nil +} + +func (i RequestValidatorImpl) ValidateFiberCtx(ctx context.Context, c *fiber.Ctx) error { + s := i.telemetry.StartSpan(ctx, "function", "validator/val_impls.RequestValidatorImpl.ValidateFiberCtx") + defer s.End() + ctx = s.Context() + + r, err := convertToStdRequest(c) + if err != nil { + return err + } + + return i.Validate(ctx, r) +} + +func convertToStdRequest(c *fiber.Ctx) (*http.Request, error) { + stdReq := &http.Request{} + if err := fasthttpadaptor.ConvertRequest(c.Context(), stdReq, true); err != nil { + return nil, err + } + + return stdReq, nil +} + +func copyBody(req *http.Request) ([]byte, error) { + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + req.Body = io.NopCloser(bytes.NewBuffer(body)) + return body, nil +} diff --git a/internal/validators/validator.go b/internal/validators/validator.go new file mode 100644 index 0000000..0e07a96 --- /dev/null +++ b/internal/validators/validator.go @@ -0,0 +1,16 @@ +package validators + +import ( + "context" + "github.com/gofiber/fiber/v2" + "net/http" +) + +type BodyValidator interface { + Validate(v any) error +} + +type RequestValidator interface { + Validate(ctx context.Context, r *http.Request) error + ValidateFiberCtx(ctx context.Context, c *fiber.Ctx) error +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..61c948e --- /dev/null +++ b/main.go @@ -0,0 +1,324 @@ +package main + +import ( + "context" + "crypto/ed25519" + "crypto/rand" + "database/sql" + "database/sql/driver" + "github.com/lysand-org/versia-go/internal/handlers/follow_handler" + "github.com/lysand-org/versia-go/internal/handlers/meta_handler" + "github.com/lysand-org/versia-go/internal/handlers/note_handler" + "github.com/lysand-org/versia-go/internal/repository" + "github.com/lysand-org/versia-go/internal/repository/repo_impls" + "github.com/lysand-org/versia-go/internal/service/svc_impls" + "github.com/lysand-org/versia-go/internal/validators/val_impls" + "net/http" + "os" + "os/signal" + "strings" + "sync" + "time" + + "entgo.io/ent/dialect" + entsql "entgo.io/ent/dialect/sql" + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/go-logr/zerologr" + "github.com/gofiber/fiber/v2" + "github.com/gofiber/fiber/v2/middleware/cors" + "github.com/google/uuid" + pgx "github.com/jackc/pgx/v5/stdlib" + "github.com/lysand-org/versia-go/config" + "github.com/lysand-org/versia-go/ent" + "github.com/lysand-org/versia-go/ent/user" + "github.com/lysand-org/versia-go/internal/database" + "github.com/lysand-org/versia-go/internal/handlers/user_handler" + "github.com/lysand-org/versia-go/internal/tasks" + "github.com/lysand-org/versia-go/internal/utils" + "github.com/lysand-org/versia-go/pkg/lysand" + "github.com/lysand-org/versia-go/pkg/taskqueue" + "github.com/nats-io/nats.go" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "modernc.org/sqlite" +) + +func init() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) +} + +func main() { + zerolog.SetGlobalLevel(zerolog.TraceLevel) + zerologr.NameFieldName = "logger" + zerologr.NameSeparator = "/" + zerologr.SetMaxV(2) + + config.Load() + + tel, err := unitel.Initialize(config.C.Telemetry) + if err != nil { + log.Fatal().Err(err).Msg("failed to initialize telemetry") + } + + federationClient := lysand.NewClient(lysand.WithHTTPClient(&http.Client{ + Transport: tel.NewTracedTransport( + http.DefaultTransport, + false, + []string{"origin", "date", "signature"}, + []string{"host", "date", "signature"}, + ), + }), lysand.WithLogger(zerologr.New(&log.Logger).WithName("federation-client"))) + + log.Debug().Msg("Opening database connection") + var db *ent.Client + if strings.HasPrefix(config.C.DatabaseURI, "postgres://") { + db, err = openDB(tel, false, config.C.DatabaseURI) + } else { + db, err = openDB(tel, true, config.C.DatabaseURI) + } + if err != nil { + log.Fatal().Err(err).Msg("failed opening connection to the database") + } + defer db.Close() + + nc, err := nats.Connect(config.C.NATSURI) + if err != nil { + log.Fatal().Err(err).Msg("failed to connect to NATS") + } + + log.Debug().Msg("Starting taskqueue client") + tq, err := taskqueue.NewClient(context.Background(), "versia-go", nc, tel, zerologr.New(&log.Logger).WithName("taskqueue-client")) + if err != nil { + log.Fatal().Err(err).Msg("failed to create taskqueue client") + } + defer tq.Close() + + log.Debug().Msg("Running schema migration") + if err := migrateDB(db, zerologr.New(&log.Logger).WithName("migrate-db"), tel); err != nil { + log.Fatal().Err(err).Msg("failed to run schema migration") + } + + // Stateless services + + federationService := svc_impls.NewFederationServiceImpl(federationClient, tel, zerologr.New(&log.Logger).WithName("federation-service")) + taskService := svc_impls.NewTaskServiceImpl(tq, tel, zerologr.New(&log.Logger).WithName("task-service")) + + // Manager + + repos := repo_impls.NewManagerImpl(db, tel, zerologr.New(&log.Logger).WithName("repositories"), func(db *ent.Client, log logr.Logger, telemetry *unitel.Telemetry) repository.UserRepository { + return repo_impls.NewUserRepositoryImpl(federationService, db, log, telemetry) + }, repo_impls.NewNoteRepositoryImpl, repo_impls.NewFollowRepositoryImpl) + + // Validators + + bodyValidator := val_impls.NewBodyValidator(zerologr.New(&log.Logger).WithName("validation-service")) + requestValidator := val_impls.NewRequestValidator(repos, tel, zerologr.New(&log.Logger).WithName("request-validator")) + + // Services + + userService := svc_impls.NewUserServiceImpl(repos, federationService, tel, zerologr.New(&log.Logger).WithName("user-service")) + noteService := svc_impls.NewNoteServiceImpl(federationService, taskService, repos, tel, zerologr.New(&log.Logger).WithName("note-service")) + followService := svc_impls.NewFollowServiceImpl(federationService, repos, tel, zerologr.New(&log.Logger).WithName("follow-service")) + inboxService := svc_impls.NewInboxService(repos, federationService, tel, zerologr.New(&log.Logger).WithName("inbox-service")) + + // Handlers + + userHandler := user_handler.New(userService, federationService, inboxService, bodyValidator, requestValidator, zerologr.New(&log.Logger).WithName("user-handler")) + noteHandler := note_handler.New(noteService, bodyValidator, zerologr.New(&log.Logger).WithName("notes-handler")) + followHandler := follow_handler.New(followService, federationService, zerologr.New(&log.Logger).WithName("follow-handler")) + metaHandler := meta_handler.New(zerologr.New(&log.Logger).WithName("meta-handler")) + + // Initialization + + if err := initServerActor(db, tel); err != nil { + log.Fatal().Err(err).Msg("failed to initialize server actor") + } + + web := fiber.New(fiber.Config{ + ProxyHeader: "X-Forwarded-For", + ErrorHandler: fiberErrorHandler, + DisableStartupMessage: true, + AppName: "versia-go", + EnablePrintRoutes: true, + }) + + web.Use(cors.New(cors.Config{ + AllowOriginsFunc: func(origin string) bool { + return true + }, + AllowMethods: "GET,POST,PUT,DELETE,PATCH", + AllowHeaders: "Origin, Content-Type, Accept, Authorization, b3, traceparent, sentry-trace, baggage", + AllowCredentials: true, + ExposeHeaders: "", + MaxAge: 0, + })) + + web.Use(tel.FiberMiddleware(unitel.FiberMiddlewareConfig{ + Repanic: false, + WaitForDelivery: false, + Timeout: 5 * time.Second, + // host for incoming requests + TraceRequestHeaders: []string{"origin", "date", "signature", "host"}, + // origin for outgoing requests + TraceResponseHeaders: []string{"origin", "date", "signature", "origin"}, + // IgnoredRoutes: nil, + })) + web.Use(unitel.RequestLogger(log.Logger, true, true)) + + log.Debug().Msg("Registering handlers") + + userHandler.Register(web.Group("/")) + noteHandler.Register(web.Group("/")) + followHandler.Register(web.Group("/")) + metaHandler.Register(web.Group("/")) + + wg := sync.WaitGroup{} + wg.Add(2) + + // TODO: Run these in separate processes, if wanted + go func() { + defer wg.Done() + + log.Debug().Msg("Starting taskqueue consumer") + + tasks.NewHandler(federationService, repos, tel, zerologr.New(&log.Logger).WithName("task-handler")). + Register(tq) + + if err := tq.Start(context.Background()); err != nil { + log.Fatal().Err(err).Msg("failed to start taskqueue client") + } + }() + + go func() { + defer wg.Done() + + log.Debug().Msg("Starting server") + if err := web.ListenTLS(":8443", "cert.pem", "key.pem"); err != nil { + log.Fatal().Err(err).Msg("Failed to start server") + } + }() + + signalCh := make(chan os.Signal, 1) + signal.Notify(signalCh, os.Interrupt) + <-signalCh + + log.Info().Msg("Shutting down") + + tq.Close() + if err := web.Shutdown(); err != nil { + log.Error().Err(err).Msg("Failed to shutdown server") + } + + wg.Wait() +} + +func openDB(t *unitel.Telemetry, isSqlite bool, uri string) (*ent.Client, error) { + s := t.StartSpan(context.Background(), "function", "main.openDB") + defer s.End() + + var drv driver.Driver + var dialectType string + var dbType string + + if isSqlite { + log.Debug().Msg("Opening SQLite database connection") + drv = &sqliteDriver{Driver: &sqlite.Driver{}} + dialectType = dialect.SQLite + dbType = "sqlite" + } else { + log.Debug().Msg("Opening PostgreSQL database connection") + drv = &pgx.Driver{} + dialectType = dialect.Postgres + dbType = "postgres" + } + + sql.Register(dialectType+"-traced", t.TraceSQL(drv, dbType, false)) + + db, err := sql.Open(dialectType+"-traced", uri) + if err != nil { + return nil, err + } + + entDrv := entsql.OpenDB(dialectType, db) + return ent.NewClient(ent.Driver(entDrv)), nil +} + +func migrateDB(db *ent.Client, log logr.Logger, telemetry *unitel.Telemetry) error { + s := telemetry.StartSpan(context.Background(), "function", "main.migrateDB") + defer s.End() + ctx := s.Context() + + log.V(1).Info("Migrating database schema") + if err := db.Schema.Create(ctx); err != nil { + log.Error(err, "Failed to migrate database schema") + return err + } + + log.V(1).Info("Database migration complete") + + return nil +} + +func initServerActor(db *ent.Client, telemetry *unitel.Telemetry) error { + s := telemetry.StartSpan(context.Background(), "function", "main.initServerActor") + defer s.End() + ctx := s.Context() + + tx, err := database.BeginTx(ctx, db, telemetry) + if err != nil { + return err + } + defer func(tx *database.Tx) { + if err := tx.Finish(); err != nil { + log.Error().Err(err).Msg("Failed to finish transaction") + } + }(tx) + ctx = tx.Context() + + _, err = tx.User.Query(). + Where(user.Username("actor")). + Only(ctx) + if err != nil && !ent.IsNotFound(err) { + log.Error().Err(err).Msg("Failed to query user") + + return err + } + + if ent.IsNotFound(err) { + pub, priv, err := ed25519.GenerateKey(rand.Reader) + if err != nil { + log.Error().Err(err).Msg("Failed to generate keypair") + + return err + } + + uid := uuid.New() + + err = tx.User.Create(). + SetID(uid). + SetUsername("actor"). + SetIsRemote(false). + SetURI(utils.UserAPIURL(uid).String()). + SetIndexable(false). + SetPrivacyLevel(user.PrivacyLevelPrivate). + SetPublicKey(pub). + SetPrivateKey(priv). + SetInbox(utils.UserInboxAPIURL(uid).String()). + SetOutbox(utils.UserOutboxAPIURL(uid).String()). + SetFeatured(utils.UserFeaturedAPIURL(uid).String()). + SetFollowers(utils.UserFollowersAPIURL(uid).String()). + SetFollowing(utils.UserFollowingAPIURL(uid).String()). + Exec(ctx) + if err != nil { + log.Error().Err(err).Msg("Failed to create user") + + return err + } + } + + tx.MarkForCommit() + + return tx.Finish() +} diff --git a/pkg/lysand/action_follow.go b/pkg/lysand/action_follow.go new file mode 100644 index 0000000..1322441 --- /dev/null +++ b/pkg/lysand/action_follow.go @@ -0,0 +1,51 @@ +package lysand + +import "encoding/json" + +type Follow struct { + Entity + + // Author is the URL to the user that triggered the follow + Author *URL `json:"author"` + // Followee is the URL to the user that is being followed + Followee *URL `json:"followee"` +} + +func (f Follow) MarshalJSON() ([]byte, error) { + type follow Follow + f2 := follow(f) + f2.Type = "Follow" + return json.Marshal(f2) +} + +type FollowAccept struct { + Entity + + // Author is the URL to the user that accepted the follow + Author *URL `json:"author"` + // Follower is the URL to the user that is now following the followee + Follower *URL `json:"follower"` +} + +func (f FollowAccept) MarshalJSON() ([]byte, error) { + type followAccept FollowAccept + f2 := followAccept(f) + f2.Type = "FollowAccept" + return json.Marshal(f2) +} + +type FollowReject struct { + Entity + + // Author is the URL to the user that rejected the follow + Author *URL `json:"author"` + // Follower is the URL to the user that is no longer following the followee + Follower *URL `json:"follower"` +} + +func (f FollowReject) MarshalJSON() ([]byte, error) { + type followReject FollowReject + f2 := followReject(f) + f2.Type = "FollowReject" + return json.Marshal(f2) +} diff --git a/pkg/lysand/action_undo.go b/pkg/lysand/action_undo.go new file mode 100644 index 0000000..0e78164 --- /dev/null +++ b/pkg/lysand/action_undo.go @@ -0,0 +1,19 @@ +package lysand + +import "encoding/json" + +type Undo struct { + Entity + + // Author is the URL to the user that triggered the undo action + Author *URL `json:"author"` + // Object is the URL to the object that was undone + Object *URL `json:"object"` +} + +func (u Undo) MarshalJSON() ([]byte, error) { + type undo Undo + u2 := undo(u) + u2.Type = "Undo" + return json.Marshal(u2) +} diff --git a/pkg/lysand/actor_user.go b/pkg/lysand/actor_user.go new file mode 100644 index 0000000..78adf31 --- /dev/null +++ b/pkg/lysand/actor_user.go @@ -0,0 +1,150 @@ +package lysand + +import ( + "bytes" + "context" + "crypto/ed25519" + "encoding/json" + "fmt" + "net/http" + "net/url" + "time" +) + +// User represents a user object in the Lysand protocol. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/user +type User struct { + Entity + + // PublicKey is the public key of the user. + // https://lysand.org/objects/user#public-key + PublicKey PublicKey `json:"public_key"` + + // DisplayName is the display name of the user. + // https://lysand.org/objects/user#display-name + DisplayName *string `json:"display_name,omitempty"` + + // Username is the username of the user. Must be unique on the instance and match the following regex: ^[a-z0-9_-]+$ + // https://lysand.org/objects/user#username + Username string `json:"username"` + + // Indexable is a boolean that indicates whether the user is indexable by search engines. + // https://lysand.org/objects/user#indexable + Indexable bool `json:"indexable"` + + // ManuallyApprovesFollowers is a boolean that indicates whether the user manually approves followers. + // https://lysand.org/objects/user#manually-approves-followers + ManuallyApprovesFollowers bool `json:"manually_approves_followers"` + + // Avatar is the avatar of the user in different image content types. + // https://lysand.org/objects/user#avatar + Avatar ImageContentTypeMap `json:"avatar,omitempty"` + + // Header is the header image of the user in different image content types. + // https://lysand.org/objects/user#header + Header ImageContentTypeMap `json:"header,omitempty"` + + // Bio is the biography of the user in different text content types. + // https://lysand.org/objects/user#bio + Bio TextContentTypeMap `json:"bio"` + + // Fields is a list of fields that the user has filled out. + // https://lysand.org/objects/user#fields + Fields []Field `json:"fields,omitempty"` + + // Featured is the featured posts of the user. + // https://lysand.org/objects/user#featured + Featured *URL `json:"featured"` + + // Followers is the followers of the user. + // https://lysand.org/objects/user#followers + Followers *URL `json:"followers"` + + // Following is the users that the user is following. + // https://lysand.org/objects/user#following + Following *URL `json:"following"` + + // Likes is the likes of the user. + // https://lysand.org/objects/user#likes + Likes *URL `json:"likes"` + + // Dislikes is the dislikes of the user. + // https://lysand.org/objects/user#dislikes + Dislikes *URL `json:"dislikes"` + + // Inbox is the inbox of the user. + // https://lysand.org/objects/user#posts + Inbox *URL `json:"inbox"` + + // Outbox is the outbox of the user. + // https://lysand.org/objects/user#outbox + Outbox *URL `json:"outbox"` +} + +func (u User) MarshalJSON() ([]byte, error) { + type user User + u2 := user(u) + u2.Type = "User" + return json.Marshal(u2) +} + +type Field struct { + Key TextContentTypeMap `json:"key"` + Value TextContentTypeMap `json:"value"` +} + +func (c *FederationClient) GetUser(ctx context.Context, uri *url.URL) (*User, error) { + resp, body, err := c.rawGET(ctx, uri) + if err != nil { + return nil, err + } + + user := &User{} + if err := json.Unmarshal(body, user); err != nil { + return nil, err + } + + date, sigHeader, err := ExtractFederationHeaders(resp.Header) + if err != nil { + return nil, err + } + + v := Verifier{ed25519.PublicKey(user.PublicKey.PublicKey)} + if !v.Verify("GET", date, uri.Host, uri.Path, body, sigHeader) { + c.log.V(2).Info("signature verification failed", "user", user.URI.String()) + return nil, fmt.Errorf("signature verification failed") + } + c.log.V(2).Info("signature verification succeeded", "user", user.URI.String()) + + return user, nil +} + +func (c *FederationClient) SendToInbox(ctx context.Context, signer Signer, user *User, object any) ([]byte, error) { + uri := user.Inbox.ToStd() + + body, err := json.Marshal(object) + if err != nil { + return nil, err + } + + date := time.Now() + + sigData := NewSignatureData("POST", date, uri.Host, uri.Path, hashSHA256(body)) + sig := signer.Sign(*sigData) + + req, err := http.NewRequestWithContext(ctx, "POST", uri.String(), bytes.NewReader(body)) + if err != nil { + return nil, err + } + + req.Header.Set("Date", TimeFromStd(date).String()) + req.Header.Set("Signature", sig.String()) + + _, respBody, err := c.doReq(req) + if err != nil { + return nil, err + } + + return respBody, nil +} diff --git a/pkg/lysand/attachment.go b/pkg/lysand/attachment.go new file mode 100644 index 0000000..0799f7b --- /dev/null +++ b/pkg/lysand/attachment.go @@ -0,0 +1,26 @@ +package lysand + +// Attachment is a file or other piece of content that is attached to a post. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/structures/content-format +type Attachment struct { + // URL to the attachment + Content string `json:"content"` + Description string `json:"description"` + Hash DataHash `json:"hash"` + Size int `json:"size"` + + // BlurHash is available when the content type is an image + BlurHash *string `json:"blurhash,omitempty"` + // BlurHash is available when the content type is an image + Height *int `json:"height,omitempty"` + // BlurHash is available when the content type is an image + Width *int `json:"width,omitempty"` + + // TODO: Figure out when this is available + FPS *int `json:"fps,omitempty"` +} + +type DataHash struct { + SHA256 string `json:"sha256"` +} diff --git a/pkg/lysand/content_types.go b/pkg/lysand/content_types.go new file mode 100644 index 0000000..a28850d --- /dev/null +++ b/pkg/lysand/content_types.go @@ -0,0 +1,85 @@ +package lysand + +import ( + "encoding/json" + "slices" + + "github.com/rs/zerolog/log" +) + +var ( + validTextContentTypes = []string{"text/html", "text/plain"} + validImageContentTypes = []string{"image/png", "image/jpeg", "image/gif", "image/svg+xml"} +) + +// ContentTypeMap is a map of content types to their respective content. +type ContentTypeMap[T any] map[string]T + +func (m *ContentTypeMap[T]) unmarshalJSON(raw []byte, valid []string) error { + var cm map[string]json.RawMessage + if err := json.Unmarshal(raw, &cm); err != nil { + return err + } + + *m = make(ContentTypeMap[T]) + + for k, v := range cm { + if !slices.Contains(valid, k) { + // TODO: replace with logr + log.Debug().Caller().Str("mimetype", k).Msg("unexpected content type, skipping") + continue + } + + var c T + if err := json.Unmarshal(v, &c); err != nil { + return err + } + (*m)[k] = c + } + + return nil +} + +func (m ContentTypeMap[T]) getPreferred(preferred []string) *T { + for _, v := range preferred { + if c, ok := m[v]; ok { + return &c + } + } + + return nil +} + +type TextContent struct { + Content string `json:"content"` +} +type TextContentTypeMap ContentTypeMap[TextContent] + +func (t *TextContentTypeMap) UnmarshalJSON(data []byte) error { + return (*ContentTypeMap[TextContent])(t).unmarshalJSON(data, validTextContentTypes) +} + +func (t TextContentTypeMap) String() string { + if c := (ContentTypeMap[TextContent])(t).getPreferred(validTextContentTypes); c != nil { + return c.Content + } + + return "" +} + +type ImageContent struct { + Content *URL `json:"content"` +} +type ImageContentTypeMap ContentTypeMap[ImageContent] + +func (i *ImageContentTypeMap) UnmarshalJSON(data []byte) error { + return (*ContentTypeMap[ImageContent])(i).unmarshalJSON(data, validImageContentTypes) +} + +func (i ImageContentTypeMap) String() string { + if c := (ContentTypeMap[ImageContent])(i).getPreferred(validImageContentTypes); c != nil { + return c.Content.String() + } + + return "" +} diff --git a/pkg/lysand/crypto.go b/pkg/lysand/crypto.go new file mode 100644 index 0000000..5dca960 --- /dev/null +++ b/pkg/lysand/crypto.go @@ -0,0 +1,92 @@ +package lysand + +import ( + "bytes" + "crypto/ed25519" + "crypto/sha256" + "fmt" + "io" + "log" + "net/http" + "time" +) + +func (c *FederationClient) ValidateSignatureHeader(req *http.Request) (bool, error) { + date, sigHeader, err := ExtractFederationHeaders(req.Header) + if err != nil { + return false, err + } + + // TODO: Fetch user from database instead of using the URI + user, err := c.GetUser(req.Context(), sigHeader.KeyID) + if err != nil { + return false, err + } + + body, err := copyBody(req) + if err != nil { + return false, err + } + + v := Verifier{ed25519.PublicKey(user.PublicKey.PublicKey)} + valid := v.Verify(req.Method, date, req.Host, req.URL.Path, body, sigHeader) + + return valid, nil +} + +func ExtractFederationHeaders(h http.Header) (time.Time, *SignatureHeader, error) { + gotDates := h.Values("date") + var date *Time + for i, raw := range gotDates { + if parsed, err := ParseTime(raw); err != nil { + log.Printf("invalid date[%d] header: %s", i, raw) + continue + } else { + date = &parsed + break + } + } + if date == nil { + return time.Time{}, nil, fmt.Errorf("missing date header") + } + + gotSignature := h.Get("signature") + if gotSignature == "" { + return date.ToStd(), nil, fmt.Errorf("missing signature header") + } + sigHeader, err := ParseSignatureHeader(gotSignature) + if err != nil { + return date.ToStd(), nil, err + } + + return date.ToStd(), sigHeader, nil +} + +func hashSHA256(data []byte) []byte { + h := sha256.New() + h.Write(data) + return h.Sum(nil) +} + +func must[In any, Out any](fn func(In) (Out, error), v In) Out { + out, err := fn(v) + if err != nil { + panic(err) + } + + return out +} + +func copyBody(req *http.Request) ([]byte, error) { + body, err := io.ReadAll(req.Body) + if err != nil { + return nil, err + } + + if err := req.Body.Close(); err != nil { + return nil, err + } + + req.Body = io.NopCloser(bytes.NewBuffer(body)) + return body, nil +} diff --git a/pkg/lysand/crypto_test.go b/pkg/lysand/crypto_test.go new file mode 100644 index 0000000..09cba1f --- /dev/null +++ b/pkg/lysand/crypto_test.go @@ -0,0 +1,100 @@ +package lysand + +import ( + "crypto/ed25519" + "crypto/x509" + "encoding/base64" + "net/url" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +func TestFederationClient_ValidateSignatureHeader(t *testing.T) { + var ( + bobPrivBytes = must(base64.StdEncoding.DecodeString, "MC4CAQAwBQYDK2VwBCIEINOATgmaya61Ha9OEE+DD3RnOEqDaHyQ3yLf5upwskUU") + bobPubBytes = must(base64.StdEncoding.DecodeString, "MCowBQYDK2VwAyEAQ08Z/FJ5f16o8mthLaFZMo4ssn0fJ7c+bipNYm3kId4=") + ) + + bobPub := must(x509.ParsePKIXPublicKey, bobPubBytes).(ed25519.PublicKey) + bobPriv := must(x509.ParsePKCS8PrivateKey, bobPrivBytes).(ed25519.PrivateKey) + + date := time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC) + body := []byte("hello") + + sigData := NewSignatureData("POST", date, "example2.com", "/users/bob", hashSHA256(body)) + + sig := Signer{PrivateKey: bobPriv, UserURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/users/bob"}}. + Sign(*sigData) + + t.Run("validate against itself", func(t *testing.T) { + v := Verifier{ + PublicKey: bobPub, + } + + if !v.Verify("POST", date, "example2.com", "/users/bob", body, sig) { + t.Error("signature verification failed") + } + }) + + t.Run("validate against @lysand/api JS implementation", func(t *testing.T) { + expectedSignedString := `(request-target): post /users/bob +host: example2.com +date: 1970-01-01T00:00:00.000Z +digest: SHA-256=LPJNul+wow4m6DsqxbninhsWHlwfp0JecwQzYpOLmCQ= +` + assert.Equal(t, expectedSignedString, sigData.String()) + + expectedSignatureHeader := `keyId="https://example.com/users/bob",algorithm="ed25519",headers="(request-target) host date digest",signature="PbVicu1spnATYUznWn6N5ebNUC+w94U9k6y4dncLsr6hNfUD8CLInbUSkgR3AZrCWEZ+Md2+Lch70ofiSqXgAQ=="` + assert.Equal(t, expectedSignatureHeader, sig.String()) + }) +} + +func TestSignatureInterop(t *testing.T) { + var ( + bobPubBytes = must(base64.StdEncoding.DecodeString, "MCowBQYDK2VwAyEAgKNt+9eyOXdb7MSrrmHlsFD2H9NGwC+56PjpWD46Tcs=") + bobPrivBytes = must(base64.StdEncoding.DecodeString, "MC4CAQAwBQYDK2VwBCIEII+nkwT3nXwBp9FEE0q95RBBfikf6UTzPzdH2yrtIvL1") + ) + + bobPub := must(x509.ParsePKIXPublicKey, bobPubBytes).(ed25519.PublicKey) + bobPriv := must(x509.ParsePKCS8PrivateKey, bobPrivBytes).(ed25519.PrivateKey) + + signedString := `(request-target): post /api/users/ec042557-8c30-492d-87d6-9e6495993072/inbox +host: lysand-test.i.devminer.xyz +date: 2024-07-25T21:03:24.866Z +digest: SHA-256=mPN5WKMoC4k3zor6FPTJUhDQ1JKX6zqA2QfEGh3omuc= +` + method := "POST" + dateHeader := "2024-07-25T21:03:24.866Z" + date := must(ParseTime, dateHeader) + host := "lysand-test.i.devminer.xyz" + path := "/api/users/ec042557-8c30-492d-87d6-9e6495993072/inbox" + body := []byte(`{"type":"Follow","id":"2265b3b2-a176-4b20-8fcf-ac82cf2efd7d","author":"https://lysand.i.devminer.xyz/users/0190d697-c83a-7376-8d15-0f77fd09e180","followee":"https://lysand-test.i.devminer.xyz/api/users/ec042557-8c30-492d-87d6-9e6495993072/","created_at":"2024-07-25T21:03:24.863Z","uri":"https://lysand.i.devminer.xyz/follows/2265b3b2-a176-4b20-8fcf-ac82cf2efd7d"}`) + signatureHeader := `keyId="https://lysand.i.devminer.xyz/users/0190d697-c83a-7376-8d15-0f77fd09e180",algorithm="ed25519",headers="(request-target) host date digest",signature="KUkKYexLk2hOfE+NVIacLDHSJP2QpX4xJGclHhQIM39ce2or7UJauRtCL8eWrhpSgQdVPk11bYhvvi8fdCruBw=="` + + sigData := NewSignatureData(method, date.ToStd(), host, path, hashSHA256(body)) + assert.Equal(t, signedString, sigData.String()) + + t.Run("signature header parsing", func(t *testing.T) { + parsedSignatureHeader, err := ParseSignatureHeader(signatureHeader) + if err != nil { + t.Error(err) + } + assert.Equal(t, "https://lysand.i.devminer.xyz/users/0190d697-c83a-7376-8d15-0f77fd09e180", parsedSignatureHeader.KeyID.String()) + assert.Equal(t, "ed25519", parsedSignatureHeader.Algorithm) + assert.Equal(t, "(request-target) host date digest", parsedSignatureHeader.Headers) + assert.Equal(t, sigData.Sign(bobPriv), parsedSignatureHeader.Signature) + + v := Verifier{PublicKey: bobPub} + if !v.Verify(method, date.ToStd(), host, path, body, parsedSignatureHeader) { + t.Error("signature verification failed") + } + }) + + t.Run("signature header generation", func(t *testing.T) { + sig := Signer{PrivateKey: bobPriv, UserURL: &url.URL{Scheme: "https", Host: "lysand.i.devminer.xyz", Path: "/users/0190d697-c83a-7376-8d15-0f77fd09e180"}}. + Sign(*sigData) + assert.Equal(t, signatureHeader, sig.String()) + }) +} diff --git a/pkg/lysand/entity.go b/pkg/lysand/entity.go new file mode 100644 index 0000000..c0083c1 --- /dev/null +++ b/pkg/lysand/entity.go @@ -0,0 +1,43 @@ +package lysand + +import ( + "github.com/google/uuid" +) + +// Entity is the base type for all Lysand entities. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects#types +type Entity struct { + // Type is the type of the entity + Type string `json:"type"` + + // ID is a UUID for the entity + ID uuid.UUID `json:"id"` + + // URI is the URL to the entity + URI *URL `json:"uri"` + + // CreatedAt is the time the entity was created + CreatedAt Time `json:"created_at"` + + // Extensions is a map of active extensions + // https://lysand.org/objects/server-metadata#extensions + Extensions Extensions `json:"extensions,omitempty"` +} + +type Extensions map[string]any + +// { +// "org.lysand:custom_emojis": { +// "emojis": [ +// { +// "name": "neocat_3c", +// "url": { +// "image/webp": { +// "content": "https://cdn.lysand.org/a97727158bf062ad31cbfb02e212ce0c7eca599a2f863276511b8512270b25e8/neocat_3c_256.webp" +// } +// } +// } +// ] +// } +// } diff --git a/pkg/lysand/federation_client.go b/pkg/lysand/federation_client.go new file mode 100644 index 0000000..eb1a408 --- /dev/null +++ b/pkg/lysand/federation_client.go @@ -0,0 +1,123 @@ +package lysand + +import ( + "context" + "fmt" + "github.com/go-logr/logr" + "io" + "net/http" + "net/url" + "time" +) + +type ResponseError struct { + StatusCode int + URL *url.URL +} + +func (e *ResponseError) Error() string { + return fmt.Sprintf("error from %s: %d", e.URL, e.StatusCode) +} + +type FederationClient struct { + log logr.Logger + httpC *http.Client +} + +type Opt func(c *FederationClient) + +func WithHTTPClient(h *http.Client) Opt { + return func(c *FederationClient) { + c.httpC = h + } +} + +func WithLogger(l logr.Logger) Opt { + return func(c *FederationClient) { + c.log = l + } +} + +func NewClient(opts ...Opt) *FederationClient { + c := &FederationClient{ + httpC: http.DefaultClient, + log: logr.Discard(), + } + + for _, opt := range opts { + opt(c) + } + + c.httpC.Transport = &federationClientHTTPTransport{ + inner: c.httpC.Transport, + useragent: "github.com/thedevminertv/go-lysand#0.0.1", + } + + return c +} + +func (c *FederationClient) rawGET(ctx context.Context, uri *url.URL) (*http.Response, []byte, error) { + req, err := http.NewRequestWithContext(ctx, "GET", uri.String(), nil) + if err != nil { + return nil, nil, err + } + + return c.doReq(req) +} + +func (c *FederationClient) rawPOST(ctx context.Context, uri *url.URL, body io.Reader) (*http.Response, []byte, error) { + req, err := http.NewRequestWithContext(ctx, "POST", uri.String(), body) + if err != nil { + return nil, nil, err + } + + return c.doReq(req) +} + +func (c *FederationClient) doReq(req *http.Request) (*http.Response, []byte, error) { + resp, err := c.httpC.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if resp.StatusCode >= http.StatusBadRequest { + return resp, nil, &ResponseError{ + StatusCode: resp.StatusCode, + URL: req.URL, + } + } + + respBody, err := io.ReadAll(resp.Body) + if err != nil { + return resp, nil, err + } + + return resp, respBody, nil +} + +type federationClientHTTPTransport struct { + inner http.RoundTripper + useragent string + l logr.Logger +} + +func (t *federationClientHTTPTransport) RoundTrip(req *http.Request) (*http.Response, error) { + req.Header.Set("Accept", "application/json") + req.Header.Set("User-Agent", t.useragent) + + if req.Body != nil { + req.Header.Set("Content-Type", "application/json") + } + + start := time.Now() + res, err := t.inner.RoundTrip(req) + elapsed := time.Since(start) + if err == nil { + t.l.V(1).Info("fetch succeeded", "url", req.URL.String(), "status", res.StatusCode, "duration", elapsed) + } else { + t.l.V(1).Error(err, "fetch failed", "url", req.URL.String(), "duration", elapsed) + } + + return res, err +} diff --git a/pkg/lysand/inbox.go b/pkg/lysand/inbox.go new file mode 100644 index 0000000..ad8cba8 --- /dev/null +++ b/pkg/lysand/inbox.go @@ -0,0 +1,72 @@ +package lysand + +import ( + "encoding/json" + "fmt" +) + +type inboxObject struct { + Type string `json:"type"` +} + +func ParseInboxObject(raw json.RawMessage) (any, error) { + var i inboxObject + if err := json.Unmarshal(raw, &i); err != nil { + return nil, err + } + + switch i.Type { + case "Publication": + m := Publication{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + case "Note": + m := Note{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + case "Patch": + m := Patch{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + case "Follow": + m := Follow{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + case "FollowAccept": + m := FollowAccept{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + case "FollowReject": + m := FollowReject{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + case "Undo": + m := Undo{} + if err := json.Unmarshal(raw, &m); err != nil { + return nil, err + } + return m, nil + default: + return nil, ErrUnknownType{Type: i.Type} + } +} + +type ErrUnknownType struct { + Type string +} + +func (e ErrUnknownType) Error() string { + return fmt.Sprintf("unknown inbox object type: %s", e.Type) +} diff --git a/pkg/lysand/public_key.go b/pkg/lysand/public_key.go new file mode 100644 index 0000000..b61591a --- /dev/null +++ b/pkg/lysand/public_key.go @@ -0,0 +1,66 @@ +package lysand + +import ( + "crypto/ed25519" + "crypto/x509" + "encoding/base64" + "encoding/json" + "errors" +) + +var ( + ErrInvalidPublicKeyType = errors.New("invalid public key type") +) + +// PublicKey represents a public key for a user. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/security/keys#public-key-cryptography +type PublicKey struct { + PublicKey SPKIPublicKey `json:"public_key"` + Actor *URL `json:"actor"` +} + +// SPKIPublicKey is a type that represents a [ed25519.PublicKey] in the SPKI +// format. +type SPKIPublicKey ed25519.PublicKey + +// UnmarshalJSON decodes the public key from a base64 encoded string and then unmarshals it from the SPKI form. +func (k *SPKIPublicKey) UnmarshalJSON(raw []byte) error { + rawStr := "" + if err := json.Unmarshal(raw, &rawStr); err != nil { + return err + } + + raw, err := base64.StdEncoding.DecodeString(rawStr) + if err != nil { + return err + } + + parsed, err := x509.ParsePKIXPublicKey(raw) + if err != nil { + return err + } + + edKey, ok := parsed.(ed25519.PublicKey) + if !ok { + return ErrInvalidPublicKeyType + } + + *k = SPKIPublicKey(edKey) + + return nil +} + +// MarshalJSON marshals the SPKI-encoded public key to a base64 encoded string. +func (k SPKIPublicKey) MarshalJSON() ([]byte, error) { + raw, err := x509.MarshalPKIXPublicKey(ed25519.PublicKey(k)) + if err != nil { + return nil, err + } + + return json.Marshal(base64.StdEncoding.EncodeToString(raw)) +} + +func (k SPKIPublicKey) ToStd() ed25519.PublicKey { + return ed25519.PublicKey(k) +} diff --git a/pkg/lysand/public_key_test.go b/pkg/lysand/public_key_test.go new file mode 100644 index 0000000..7cdd525 --- /dev/null +++ b/pkg/lysand/public_key_test.go @@ -0,0 +1,33 @@ +package lysand + +import ( + "crypto/ed25519" + "crypto/x509" + "encoding/base64" + "encoding/json" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestSPKIPublicKey_UnmarshalJSON(t *testing.T) { + expectedPk := must(x509.ParsePKIXPublicKey, must(base64.StdEncoding.DecodeString, "MCowBQYDK2VwAyEAgKNt+9eyOXdb7MSrrmHlsFD2H9NGwC+56PjpWD46Tcs=")) + + pk := PublicKey{} + raw := []byte(`{"public_key":"MCowBQYDK2VwAyEAgKNt+9eyOXdb7MSrrmHlsFD2H9NGwC+56PjpWD46Tcs="}`) + if err := json.Unmarshal(raw, &pk); err != nil { + t.Error(err) + } + + assert.Equal(t, expectedPk, ed25519.PublicKey(pk.PublicKey)) +} + +func TestSPKIPublicKey_MarshalJSON(t *testing.T) { + expectedPk := must(x509.ParsePKIXPublicKey, must(base64.StdEncoding.DecodeString, "MCowBQYDK2VwAyEAgKNt+9eyOXdb7MSrrmHlsFD2H9NGwC+56PjpWD46Tcs=")).(ed25519.PublicKey) + + pk := PublicKey{ + PublicKey: SPKIPublicKey(expectedPk), + } + if _, err := json.Marshal(pk); err != nil { + t.Error(err) + } +} diff --git a/pkg/lysand/publication.go b/pkg/lysand/publication.go new file mode 100644 index 0000000..76bdb58 --- /dev/null +++ b/pkg/lysand/publication.go @@ -0,0 +1,118 @@ +package lysand + +// PublicationVisibility is the visibility of a publication. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/publications#visibility +type PublicationVisibility string + +const ( + // PublicationVisiblePublic means that the publication is visible to everyone. + PublicationVisiblePublic PublicationVisibility = "public" + // PublicationVisibleUnlisted means that the publication is visible everyone, but should not appear in public timelines and search results. + PublicationVisibleUnlisted PublicationVisibility = "unlisted" + // PublicationVisibleFollowers means that the publication is visible to followers only. + PublicationVisibleFollowers PublicationVisibility = "followers" + // PublicationVisibleDirect means that the publication is a direct message, and is visible only to the mentioned users. + PublicationVisibleDirect PublicationVisibility = "direct" +) + +// Publication is a publication object. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/publications +type Publication struct { + Entity + + // Author is the URL to the user + // https://lysand.org/objects/publications#author + Author *URL `json:"author"` + + // Content is the content of the publication + // https://lysand.org/objects/publications#content + Content TextContentTypeMap `json:"content,omitempty"` + + // Category is the category of the publication + // https://lysand.org/objects/publications#category + Category *CategoryType `json:"category,omitempty"` + + // Device that created the publication + // https://lysand.org/objects/publications#device + Device *Device `json:"device,omitempty"` + + // Previews is a list of URLs to preview images + // https://lysand.org/objects/publications#previews + Previews []LinkPreview `json:"previews,omitempty"` + + // Group is the URL to a group + // https://lysand.org/objects/publications#group + Group *URL `json:"group,omitempty"` + + // Attachments is a list of attachment objects, keyed by their MIME type + // https://lysand.org/objects/publications#attachments + Attachments []ContentTypeMap[Attachment] `json:"attachments,omitempty"` + + // RepliesTo is the URL to the publication being replied to + // https://lysand.org/objects/publications#replies-to + RepliesTo *URL `json:"replies_to,omitempty"` + + // Quoting is the URL to the publication being quoted + // https://lysand.org/objects/publications#quotes + Quoting *URL `json:"quoting,omitempty"` + + // Mentions is a list of URLs to users + // https://lysand.org/objects/publications#mentionshttps://lysand.org/objects/publications#mentions + Mentions []URL `json:"mentions,omitempty"` + + // Subject is the subject of the publication + // https://lysand.org/objects/publications#subject + Subject *string `json:"subject,omitempty"` + + // IsSensitive is a boolean indicating whether the publication contains sensitive content + // https://lysand.org/objects/publications#is-sensitive + IsSensitive *bool `json:"is_sensitive,omitempty"` + + // Visibility is the visibility of the publication + // https://lysand.org/objects/publications#visibility + Visibility PublicationVisibility `json:"visibility"` +} + +// LinkPreview is a preview of a link. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/publications#types +type LinkPreview struct { + Link URL `json:"link"` + Title string `json:"title"` + Description *string `json:"description"` + Image *URL `json:"image"` + Icon *URL `json:"icon"` +} + +// Device is the device that creates publications. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/publications#types +type Device struct { + Name string `json:"name"` + Version string `json:"version,omitempty"` + URL *URL `json:"url,omitempty"` +} + +// CategoryType is the type of publication. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/publications#types +type CategoryType string + +const ( + // CategoryMicroblog is similar to Twitter, Mastodon + CategoryMicroblog CategoryType = "microblog" + // CategoryForum is similar to Reddit + CategoryForum CategoryType = "forum" + // CategoryBlog is similar to Wordpress, WriteFreely + CategoryBlog CategoryType = "blog" + // CategoryImage is similar to Instagram + CategoryImage CategoryType = "image" + // CategoryVideo is similar to YouTube + CategoryVideo CategoryType = "video" + // CategoryAudio is similar to SoundCloud, Spotify + CategoryAudio CategoryType = "audio" + // CategoryMessaging is similar to Discord, Matrix, Signal + CategoryMessaging CategoryType = "messaging" +) diff --git a/pkg/lysand/publication_note.go b/pkg/lysand/publication_note.go new file mode 100644 index 0000000..621eb61 --- /dev/null +++ b/pkg/lysand/publication_note.go @@ -0,0 +1,12 @@ +package lysand + +import "encoding/json" + +type Note Publication + +func (n Note) MarshalJSON() ([]byte, error) { + type note Note + n2 := note(n) + n2.Type = "Note" + return json.Marshal(n2) +} diff --git a/pkg/lysand/publication_patch.go b/pkg/lysand/publication_patch.go new file mode 100644 index 0000000..a8ec0cb --- /dev/null +++ b/pkg/lysand/publication_patch.go @@ -0,0 +1,29 @@ +package lysand + +import ( + "encoding/json" + + "github.com/google/uuid" +) + +// Patch is a type that represents a modification to a note. For more information, see the [Spec]. +// +// [Spec]: https://lysand.org/objects/patch +type Patch struct { + Note + + // PatchedID is the ID of the publication that was patched. + // https://lysand.org/objects/patch#patched-id + PatchedID uuid.UUID `json:"patched_id"` + + // PatchedAt is the time that the publication was patched. + // https://lysand.org/objects/patch#patched-at + PatchedAt Time `json:"patched_at"` +} + +func (p Patch) MarshalJSON() ([]byte, error) { + type patch Patch + p2 := patch(p) + p2.Type = "Patch" + return json.Marshal(p2) +} diff --git a/pkg/lysand/server_metadata.go b/pkg/lysand/server_metadata.go new file mode 100644 index 0000000..4fed4b5 --- /dev/null +++ b/pkg/lysand/server_metadata.go @@ -0,0 +1,64 @@ +package lysand + +import ( + "encoding/json" + + "github.com/Masterminds/semver" +) + +// ServerMetadata represents the metadata of a Lysand server. For more information, see the [Spec]. +// +// ! Unlike other objects, server metadata is not meant to be federated. +// +// [Spec]: https://lysand.org/objects/server-metadata +type ServerMetadata struct { + // Type is always "ServerMetadata" + // https://lysand.org/objects/server-metadata#type + Type string `json:"type"` + + // Extensions is a map of active extensions + // https://lysand.org/objects/server-metadata#extensions + Extensions Extensions `json:"extensions,omitempty"` + + // Name is the name of the server + // https://lysand.org/objects/server-metadata#name + Name string `json:"name"` + + // Version is the version of the server software + // https://lysand.org/objects/server-metadata#version + Version *semver.Version `json:"version"` + + // Description is a description of the server + // https://lysand.org/objects/server-metadata#description + Description *string `json:"description,omitempty"` + + // Website is the URL to the server's website + // https://lysand.org/objects/server-metadata#website + Website *URL `json:"website,omitempty"` + + // Moderators is a list of URLs to moderators + // https://lysand.org/objects/server-metadata#moderators + Moderators []*URL `json:"moderators,omitempty"` + + // Admins is a list of URLs to administrators + // https://lysand.org/objects/server-metadata#admins + Admins []*URL `json:"admins,omitempty"` + + // Logo is the URL to the server's logo + // https://lysand.org/objects/server-metadata#logo + Logo *ImageContentTypeMap `json:"logo,omitempty"` + + // Banner is the URL to the server's banner + // https://lysand.org/objects/server-metadata#banner + Banner *ImageContentTypeMap `json:"banner,omitempty"` + + // SupportedExtensions is a list of supported extensions + SupportedExtensions []string `json:"supported_extensions"` +} + +func (s ServerMetadata) MarshalJSON() ([]byte, error) { + type serverMetadata ServerMetadata + s2 := serverMetadata(s) + s2.Type = "ServerMetadata" + return json.Marshal(s2) +} diff --git a/pkg/lysand/signature.go b/pkg/lysand/signature.go new file mode 100644 index 0000000..bc408d1 --- /dev/null +++ b/pkg/lysand/signature.go @@ -0,0 +1,70 @@ +package lysand + +import ( + "crypto/ed25519" + "encoding/base64" + "fmt" + "net/url" + "strings" + "time" +) + +type SignatureData struct { + RequestMethod string + Date time.Time + Host string + Path string + Digest []byte +} + +func NewSignatureData(method string, date time.Time, host, path string, digest []byte) *SignatureData { + return &SignatureData{ + RequestMethod: method, + Date: date, + Host: host, + Path: path, + Digest: digest, + } +} + +func (s *SignatureData) String() string { + return strings.Join([]string{ + fmt.Sprintf("(request-target): %s %s", strings.ToLower(s.RequestMethod), s.Path), + fmt.Sprintf("host: %s", s.Host), + fmt.Sprintf("date: %s", TimeFromStd(s.Date).String()), + fmt.Sprintf("digest: SHA-256=%s", base64.StdEncoding.EncodeToString(s.Digest)), + "", + }, "\n") +} + +func (s *SignatureData) Validate(pubKey ed25519.PublicKey, signature []byte) bool { + return ed25519.Verify(pubKey, []byte(s.String()), signature) +} + +func (s *SignatureData) Sign(privKey ed25519.PrivateKey) []byte { + return ed25519.Sign(privKey, []byte(s.String())) +} + +type Signer struct { + PrivateKey ed25519.PrivateKey + UserURL *url.URL +} + +func (s Signer) Sign(signatureData SignatureData) *SignatureHeader { + return &SignatureHeader{ + KeyID: s.UserURL, + Algorithm: "ed25519", + Headers: "(request-target) host date digest", + Signature: signatureData.Sign(s.PrivateKey), + } +} + +type Verifier struct { + PublicKey ed25519.PublicKey +} + +func (v Verifier) Verify(method string, date time.Time, host, path string, body []byte, sigHeader *SignatureHeader) bool { + sigData := NewSignatureData(method, date, host, path, hashSHA256(body)) + + return sigData.Validate(v.PublicKey, sigHeader.Signature) +} diff --git a/pkg/lysand/signature_header.go b/pkg/lysand/signature_header.go new file mode 100644 index 0000000..729203c --- /dev/null +++ b/pkg/lysand/signature_header.go @@ -0,0 +1,66 @@ +package lysand + +import ( + "encoding/base64" + "errors" + "fmt" + "net/url" + "strings" +) + +var ( + ErrInvalidSignatureHeader = errors.New("invalid signature header") +) + +type SignatureHeader struct { + // URL to a user + KeyID *url.URL + Headers string + Algorithm string + Signature []byte +} + +func (s SignatureHeader) String() string { + return strings.Join([]string{ + fmt.Sprintf(`keyId="%s"`, s.KeyID.String()), + fmt.Sprintf(`algorithm="%s"`, s.Algorithm), + fmt.Sprintf(`headers="%s"`, s.Headers), + fmt.Sprintf(`signature="%s"`, base64.StdEncoding.EncodeToString(s.Signature)), + }, ",") +} + +// ParseSignatureHeader parses strings in the form of +// `keyId="",algorithm="ed25519",headers="(request-target) host date digest",signature=""` +func ParseSignatureHeader(raw string) (*SignatureHeader, error) { + parts := strings.Split(raw, ",") + if len(parts) != 4 { + return nil, ErrInvalidSignatureHeader + } + + sig := &SignatureHeader{} + + for _, part := range parts { + kv := strings.SplitN(part, "=", 2) + kv[1] = strings.TrimPrefix(kv[1], "\"") + kv[1] = strings.TrimSuffix(kv[1], "\"") + + var err error + + switch kv[0] { + case "keyId": + sig.KeyID, err = url.Parse(kv[1]) + case "algorithm": + sig.Algorithm = kv[1] + case "headers": + sig.Headers = kv[1] + case "signature": + sig.Signature, err = base64.StdEncoding.DecodeString(kv[1]) + } + + if err != nil { + return nil, err + } + } + + return sig, nil +} diff --git a/pkg/lysand/signature_header_test.go b/pkg/lysand/signature_header_test.go new file mode 100644 index 0000000..a96d58e --- /dev/null +++ b/pkg/lysand/signature_header_test.go @@ -0,0 +1,41 @@ +package lysand + +import ( + "encoding/base64" + "github.com/stretchr/testify/assert" + "testing" + "time" +) + +func TestParseSignatureHeader(t *testing.T) { + data := `keyId="https://example.com/users/bob",algorithm="ed25519",headers="(request-target) host date digest",signature="PbVicu1spnATYUznWn6N5ebNUC+w94U9k6y4dncLsr6hNfUD8CLInbUSkgR3AZrCWEZ+Md2+Lch70ofiSqXgAQ=="` + expectedSignature := must(base64.StdEncoding.DecodeString, "PbVicu1spnATYUznWn6N5ebNUC+w94U9k6y4dncLsr6hNfUD8CLInbUSkgR3AZrCWEZ+Md2+Lch70ofiSqXgAQ==") + + sig, err := ParseSignatureHeader(data) + if err != nil { + t.Error(err) + } + + assert.Equal(t, "https://example.com/users/bob", sig.KeyID.String()) + assert.Equal(t, "ed25519", sig.Algorithm) + assert.Equal(t, "(request-target) host date digest", sig.Headers) + assert.Equal(t, expectedSignature, sig.Signature) +} + +func TestSignatureHeader_String(t *testing.T) { + one := SignatureData{ + RequestMethod: "POST", + Date: time.Date(1970, 1, 1, 0, 0, 0, 0, time.UTC), + Host: "example2.com", + Path: "/users/bob", + Digest: hashSHA256([]byte("hello")), + } + + expected := `(request-target): post /users/bob +host: example2.com +date: 1970-01-01T00:00:00.000Z +digest: SHA-256=LPJNul+wow4m6DsqxbninhsWHlwfp0JecwQzYpOLmCQ= +` + + assert.Equal(t, expected, one.String()) +} diff --git a/pkg/lysand/time.go b/pkg/lysand/time.go new file mode 100644 index 0000000..f682e52 --- /dev/null +++ b/pkg/lysand/time.go @@ -0,0 +1,57 @@ +package lysand + +import ( + "encoding/json" + "time" +) + +const ISO8601 = "2006-01-02T15:04:05.000Z" + +func ParseTime(s string) (Time, error) { + t, err := time.Parse(ISO8601, s) + return Time(t), err +} + +// Time is a type that represents a time in the ISO8601 format. +type Time time.Time + +// String returns the time in the ISO8601 format. +func (t Time) String() string { + return t.ToStd().Format(ISO8601) +} + +// UnmarshalJSON decodes the time from a string in the ISO8601 format. +func (t *Time) UnmarshalJSON(data []byte) error { + raw := "" + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + parsed, err := time.Parse(ISO8601, raw) + if err != nil { + return err + } + + *t = Time(parsed) + + return nil +} + +// MarshalJSON marshals the time to a string in the ISO8601 format. +func (t Time) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// ToStd converts the time to a [time.Time]. +func (t Time) ToStd() time.Time { + return time.Time(t) +} + +// TimeFromStd converts a [time.Time] to a Time. +func TimeFromStd(u time.Time) Time { + return Time(u) +} + +func TimeNow() Time { + return Time(time.Now()) +} diff --git a/pkg/lysand/url.go b/pkg/lysand/url.go new file mode 100644 index 0000000..ca395a2 --- /dev/null +++ b/pkg/lysand/url.go @@ -0,0 +1,54 @@ +package lysand + +import ( + "encoding/json" + "net/url" +) + +// URL is a type that represents a URL, represented by a string in JSON, instead of a JSON object. +type URL url.URL + +func (u *URL) ResolveReference(ref *url.URL) *URL { + return URLFromStd(u.ToStd().ResolveReference(ref)) +} + +func (u *URL) String() string { + return u.ToStd().String() +} + +func (u *URL) UnmarshalJSON(data []byte) error { + raw := "" + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + parsed, err := url.Parse(raw) + if err != nil { + return err + } + + *u = URL(*parsed) + + return nil +} + +func (u *URL) MarshalJSON() ([]byte, error) { + return json.Marshal(u.String()) +} + +func (u *URL) ToStd() *url.URL { + return (*url.URL)(u) +} + +func URLFromStd(u *url.URL) *URL { + return (*URL)(u) +} + +func ParseURL(raw string) (*URL, error) { + parsed, err := url.Parse(raw) + if err != nil { + return nil, err + } + + return URLFromStd(parsed), nil +} diff --git a/pkg/taskqueue/client.go b/pkg/taskqueue/client.go new file mode 100644 index 0000000..ad6c5d5 --- /dev/null +++ b/pkg/taskqueue/client.go @@ -0,0 +1,288 @@ +package taskqueue + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "time" + + "git.devminer.xyz/devminer/unitel" + "github.com/go-logr/logr" + "github.com/google/uuid" + "github.com/nats-io/nats.go" + "github.com/nats-io/nats.go/jetstream" +) + +type taskWrapper struct { + Task Task `json:"task"` + EnqueuedAt time.Time `json:"enqueuedAt"` + TraceInfo map[string]string `json:"traceInfo"` +} + +func (c *Client) newTaskWrapper(ctx context.Context, task Task) taskWrapper { + traceInfo := make(map[string]string) + c.telemetry.InjectIntoMap(ctx, traceInfo) + + return taskWrapper{ + Task: task, + EnqueuedAt: time.Now(), + TraceInfo: traceInfo, + } +} + +type Task struct { + ID string + Type string + Payload json.RawMessage +} + +func NewTask(type_ string, payload any) (Task, error) { + id := uuid.New() + + d, err := json.Marshal(payload) + if err != nil { + return Task{}, err + } + + return Task{ + ID: id.String(), + Type: type_, + Payload: d, + }, nil +} + +type Handler func(ctx context.Context, task Task) error + +type Client struct { + name string + subject string + handlers map[string][]Handler + + nc *nats.Conn + js jetstream.JetStream + s jetstream.Stream + + stopCh chan struct{} + closeOnce func() + + telemetry *unitel.Telemetry + log logr.Logger +} + +func NewClient(ctx context.Context, name string, natsClient *nats.Conn, telemetry *unitel.Telemetry, log logr.Logger) (*Client, error) { + js, err := jetstream.New(natsClient) + if err != nil { + return nil, err + } + + s, err := js.CreateStream(ctx, jetstream.StreamConfig{ + Name: name, + Subjects: []string{name + ".*"}, + MaxConsumers: -1, + MaxMsgs: -1, + Discard: jetstream.DiscardOld, + MaxMsgsPerSubject: -1, + Storage: jetstream.FileStorage, + Compression: jetstream.S2Compression, + AllowDirect: true, + }) + if errors.Is(err, nats.ErrStreamNameAlreadyInUse) { + s, err = js.Stream(ctx, name) + if err != nil { + return nil, err + } + } else if err != nil { + return nil, err + } + + stopCh := make(chan struct{}) + + c := &Client{ + name: name, + subject: name + ".tasks", + + handlers: map[string][]Handler{}, + + stopCh: stopCh, + closeOnce: sync.OnceFunc(func() { + close(stopCh) + }), + + nc: natsClient, + js: js, + s: s, + + telemetry: telemetry, + log: log, + } + + return c, nil +} + +func (c *Client) Close() { + c.closeOnce() + c.nc.Close() +} + +func (c *Client) Submit(ctx context.Context, task Task) error { + s := c.telemetry.StartSpan(ctx, "queue.publish", "taskqueue/Client.Submit"). + AddAttribute("messaging.destination.name", c.subject) + defer s.End() + ctx = s.Context() + + s.AddAttribute("jobID", task.ID) + + data, err := json.Marshal(c.newTaskWrapper(ctx, task)) + if err != nil { + return err + } + + s.AddAttribute("messaging.message.body.size", len(data)) + + msg, err := c.js.PublishMsg(ctx, &nats.Msg{Subject: c.subject, Data: data}) + if err != nil { + return err + } + c.log.V(1).Info("submitted task", "id", task.ID, "type", task.Type, "sequence", msg.Sequence) + + s.AddAttribute("messaging.message.id", msg.Sequence) + + return nil +} + +func (c *Client) RegisterHandler(type_ string, handler Handler) { + c.log.V(2).Info("registering handler", "type", type_) + + if _, ok := c.handlers[type_]; !ok { + c.handlers[type_] = []Handler{} + } + c.handlers[type_] = append(c.handlers[type_], handler) +} + +func (c *Client) Start(ctx context.Context) error { + c.log.Info("starting") + + sub, err := c.js.CreateConsumer(ctx, c.name, jetstream.ConsumerConfig{ + // TODO: set name properly + Name: "versia-go", + Durable: "versia-go", + DeliverPolicy: jetstream.DeliverAllPolicy, + ReplayPolicy: jetstream.ReplayInstantPolicy, + AckPolicy: jetstream.AckExplicitPolicy, + FilterSubject: c.subject, + MaxWaiting: 1, + MaxAckPending: 1, + HeadersOnly: false, + MemoryStorage: false, + }) + if err != nil { + return err + } + + m, err := sub.Messages(jetstream.PullMaxMessages(1)) + if err != nil { + return err + } + + go func() { + for { + msg, err := m.Next() + if err != nil { + if errors.Is(err, jetstream.ErrMsgIteratorClosed) { + c.log.Info("stopping") + return + } + + c.log.Error(err, "failed to get next message") + break + } + + if err := c.handleTask(ctx, msg); err != nil { + c.log.Error(err, "failed to handle task") + break + } + } + }() + go func() { + <-c.stopCh + m.Drain() + }() + + return nil +} + +func (c *Client) handleTask(ctx context.Context, msg jetstream.Msg) error { + msgMeta, err := msg.Metadata() + if err != nil { + return err + } + + data := msg.Data() + + var w taskWrapper + if err := json.Unmarshal(data, &w); err != nil { + if err := msg.Nak(); err != nil { + c.log.Error(err, "failed to nak message") + } + + return err + } + + s := c.telemetry.StartSpan( + context.Background(), + "queue.process", + "taskqueue/Client.handleTask", + c.telemetry.ContinueFromMap(w.TraceInfo), + ). + AddAttribute("messaging.destination.name", c.subject). + AddAttribute("messaging.message.id", msgMeta.Sequence.Stream). + AddAttribute("messaging.message.retry.count", msgMeta.NumDelivered). + AddAttribute("messaging.message.body.size", len(data)). + AddAttribute("messaging.message.receive.latency", time.Since(w.EnqueuedAt).Milliseconds()) + defer s.End() + ctx = s.Context() + + handlers, ok := c.handlers[w.Task.Type] + if !ok { + c.log.V(1).Info("no handler for task", "type", w.Task.Type) + return msg.Nak() + } + + var errs CombinedError + for _, handler := range handlers { + if err := handler(ctx, w.Task); err != nil { + c.log.Error(err, "handler failed", "type", w.Task.Type) + errs.Errors = append(errs.Errors, err) + } + } + + if len(errs.Errors) > 0 { + if err := msg.Nak(); err != nil { + c.log.Error(err, "failed to nak message") + errs.Errors = append(errs.Errors, err) + } + + return errs + } + + return msg.Ack() +} + +type CombinedError struct { + Errors []error +} + +func (e CombinedError) Error() string { + sb := strings.Builder{} + sb.WriteRune('[') + for i, err := range e.Errors { + if i > 0 { + sb.WriteRune(',') + } + sb.WriteString(err.Error()) + } + sb.WriteRune(']') + return sb.String() +} diff --git a/pkg/webfinger/host_meta.go b/pkg/webfinger/host_meta.go new file mode 100644 index 0000000..4518e47 --- /dev/null +++ b/pkg/webfinger/host_meta.go @@ -0,0 +1,51 @@ +package webfinger + +import ( + "encoding/json" + "net/url" +) + +type HostMeta struct { + JSON []byte + XML []byte +} + +func NewHostMeta(baseURL *url.URL) HostMeta { + template := &url.URL{Path: "/.well-known/webfinger?resource={uri}"} + template = baseURL.ResolveReference(template) + + return HostMeta{ + JSON: generateJSONHostMeta(template), + XML: generateXMLHostMeta(template), + } +} + +func generateXMLHostMeta(template *url.URL) []byte { + return []byte(` + + + `) +} + +func generateJSONHostMeta(template *url.URL) []byte { + b, err := json.Marshal(hostMetaStruct{ + Links: []hostMetaLink{{ + Rel: "lrdd", + Template: template.String(), + }}, + }) + if err != nil { + panic(err) + } + + return b +} + +type hostMetaStruct struct { + Links []hostMetaLink `json:"links"` +} + +type hostMetaLink struct { + Rel string `json:"rel"` + Template string `json:"template"` +} diff --git a/pkg/webfinger/webfinger.go b/pkg/webfinger/webfinger.go new file mode 100644 index 0000000..71410db --- /dev/null +++ b/pkg/webfinger/webfinger.go @@ -0,0 +1,72 @@ +package webfinger + +import ( + "errors" + "net/url" + "strings" +) + +var ( + ErrInvalidSyntax = errors.New("must follow the format \"acct:@\"") +) + +func ParseResource(res string) (*UserID, error) { + if !strings.HasPrefix(res, "acct:") { + return nil, ErrInvalidSyntax + } + + if !strings.Contains(res, "@") { + return nil, ErrInvalidSyntax + } + + spl := strings.Split(res, "@") + if len(spl) != 2 { + return nil, ErrInvalidSyntax + } + + userID := strings.TrimPrefix(spl[0], "acct:") + domain := spl[1] + + return &UserID{userID, domain}, nil +} + +type UserID struct { + ID string + Domain string +} + +func (u UserID) String() string { + return u.ID + "@" + u.Domain +} + +type Response struct { + Subject string `json:"subject,omitempty"` + Links []Link `json:"links,omitempty"` + + Error *string `json:"error,omitempty"` +} + +type Link struct { + Relation string `json:"rel"` + Type any `json:"type"` + Link string `json:"href"` +} + +type User struct { + UserID + + URI *url.URL + + Avatar *url.URL + AvatarMIMEType string +} + +func (u User) WebFingerResource() Response { + return Response{ + Subject: "acct:" + u.String(), + Links: []Link{ + {"self", "application/json", u.URI.String()}, + {"avatar", u.AvatarMIMEType, u.Avatar.String()}, + }, + } +} diff --git a/scripts/ssl-tunnel.sh b/scripts/ssl-tunnel.sh new file mode 100755 index 0000000..a896828 --- /dev/null +++ b/scripts/ssl-tunnel.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +sudo socat TCP-LISTEN:443,reuseaddr,fork TCP:localhost:8443 \ No newline at end of file diff --git a/web/.env b/web/.env new file mode 100644 index 0000000..4597403 --- /dev/null +++ b/web/.env @@ -0,0 +1,5 @@ +VITE_ENVIRONMENT=development +VITE_BASE_URL=https://localhost +VITE_SENTRY_DSN= +VITE_OTLP_ENDPOINT= +VITE_OTLP_TRACE_PROPAGATION=true diff --git a/web/.eslintrc.cjs b/web/.eslintrc.cjs new file mode 100644 index 0000000..b3dc3c6 --- /dev/null +++ b/web/.eslintrc.cjs @@ -0,0 +1,18 @@ +module.exports = { + root: true, + env: {browser: true, es2020: true}, + extends: [ + "eslint:recommended", + "plugin:@typescript-eslint/recommended", + "plugin:react-hooks/recommended", + ], + ignorePatterns: ["dist", ".eslintrc.cjs"], + parser: "@typescript-eslint/parser", + plugins: ["react-refresh"], + rules: { + "react-refresh/only-export-components": [ + "warn", + {allowConstantExport: true}, + ], + }, +}; diff --git a/web/.gitignore b/web/.gitignore new file mode 100644 index 0000000..a547bf3 --- /dev/null +++ b/web/.gitignore @@ -0,0 +1,24 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? diff --git a/web/index.html b/web/index.html new file mode 100644 index 0000000..f69a4c4 --- /dev/null +++ b/web/index.html @@ -0,0 +1,13 @@ + + + + + + + Vite + React + TS + + +
+ + + diff --git a/web/package.json b/web/package.json new file mode 100644 index 0000000..bb6561f --- /dev/null +++ b/web/package.json @@ -0,0 +1,46 @@ +{ + "name": "web", + "private": true, + "version": "0.0.0", + "type": "module", + "scripts": { + "dev": "vite", + "build": "tsc -b && vite build", + "lint": "eslint . --ext ts,tsx --report-unused-disable-directives --max-warnings 0", + "preview": "vite preview" + }, + "dependencies": { + "@effect/opentelemetry": "^0.35.0", + "@effect/platform": "^0.61.2", + "@effect/schema": "^0.70.1", + "@opentelemetry/exporter-trace-otlp-http": "^0.52.1", + "@opentelemetry/sdk-trace-base": "^1.25.1", + "@opentelemetry/sdk-trace-web": "^1.25.1", + "@sentry/react": "^8.22.0", + "@tanstack/react-query": "^5.51.21", + "@tanstack/react-router": "^1.45.11", + "effect": "^3.6.0", + "react": "^18.3.1", + "react-dom": "^18.3.1" + }, + "devDependencies": { + "@tanstack/react-query-devtools": "^5.51.21", + "@tanstack/router-devtools": "^1.45.11", + "@tanstack/router-plugin": "^1.45.8", + "@types/react": "^18.3.3", + "@types/react-dom": "^18.3.0", + "@typescript-eslint/eslint-plugin": "^7.15.0", + "@typescript-eslint/parser": "^7.15.0", + "@vitejs/plugin-react": "^4.3.1", + "autoprefixer": "^10.4.19", + "eslint": "^8.57.0", + "eslint-plugin-react-hooks": "^4.6.2", + "eslint-plugin-react-refresh": "^0.4.7", + "postcss": "^8.4.40", + "postcss-loader": "^8.1.1", + "tailwindcss": "^3.4.7", + "typescript": "^5.2.2", + "vite": "^5.3.4" + }, + "packageManager": "pnpm@9.6.0+sha512.38dc6fba8dba35b39340b9700112c2fe1e12f10b17134715a4aa98ccf7bb035e76fd981cf0bb384dfa98f8d6af5481c2bef2f4266a24bfa20c34eb7147ce0b5e" +} diff --git a/web/pnpm-lock.yaml b/web/pnpm-lock.yaml new file mode 100644 index 0000000..e02d4c8 --- /dev/null +++ b/web/pnpm-lock.yaml @@ -0,0 +1,3519 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@effect/opentelemetry': + specifier: ^0.35.0 + version: 0.35.0(@opentelemetry/api@1.9.0)(@opentelemetry/resources@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-node@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-web@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)(effect@3.6.0) + '@effect/platform': + specifier: ^0.61.2 + version: 0.61.2(@effect/schema@0.70.1(effect@3.6.0))(effect@3.6.0) + '@effect/schema': + specifier: ^0.70.1 + version: 0.70.1(effect@3.6.0) + '@opentelemetry/exporter-trace-otlp-http': + specifier: ^0.52.1 + version: 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': + specifier: ^1.25.1 + version: 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-web': + specifier: ^1.25.1 + version: 1.25.1(@opentelemetry/api@1.9.0) + '@sentry/react': + specifier: ^8.22.0 + version: 8.22.0(react@18.3.1) + '@tanstack/react-query': + specifier: ^5.51.21 + version: 5.51.21(react@18.3.1) + '@tanstack/react-router': + specifier: ^1.45.11 + version: 1.45.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + effect: + specifier: ^3.6.0 + version: 3.6.0 + react: + specifier: ^18.3.1 + version: 18.3.1 + react-dom: + specifier: ^18.3.1 + version: 18.3.1(react@18.3.1) + devDependencies: + '@tanstack/react-query-devtools': + specifier: ^5.51.21 + version: 5.51.21(@tanstack/react-query@5.51.21(react@18.3.1))(react@18.3.1) + '@tanstack/router-devtools': + specifier: ^1.45.11 + version: 1.45.11(@tanstack/react-router@1.45.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(csstype@3.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + '@tanstack/router-plugin': + specifier: ^1.45.8 + version: 1.45.8(vite@5.3.5(@types/node@22.1.0)) + '@types/react': + specifier: ^18.3.3 + version: 18.3.3 + '@types/react-dom': + specifier: ^18.3.0 + version: 18.3.0 + '@typescript-eslint/eslint-plugin': + specifier: ^7.15.0 + version: 7.17.0(@typescript-eslint/parser@7.17.0(eslint@8.57.0)(typescript@5.5.4))(eslint@8.57.0)(typescript@5.5.4) + '@typescript-eslint/parser': + specifier: ^7.15.0 + version: 7.17.0(eslint@8.57.0)(typescript@5.5.4) + '@vitejs/plugin-react': + specifier: ^4.3.1 + version: 4.3.1(vite@5.3.5(@types/node@22.1.0)) + autoprefixer: + specifier: ^10.4.19 + version: 10.4.19(postcss@8.4.40) + eslint: + specifier: ^8.57.0 + version: 8.57.0 + eslint-plugin-react-hooks: + specifier: ^4.6.2 + version: 4.6.2(eslint@8.57.0) + eslint-plugin-react-refresh: + specifier: ^0.4.7 + version: 0.4.9(eslint@8.57.0) + postcss: + specifier: ^8.4.40 + version: 8.4.40 + postcss-loader: + specifier: ^8.1.1 + version: 8.1.1(postcss@8.4.40)(typescript@5.5.4) + tailwindcss: + specifier: ^3.4.7 + version: 3.4.7 + typescript: + specifier: ^5.2.2 + version: 5.5.4 + vite: + specifier: ^5.3.4 + version: 5.3.5(@types/node@22.1.0) + +packages: + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@ampproject/remapping@2.3.0': + resolution: {integrity: sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw==} + engines: {node: '>=6.0.0'} + + '@babel/code-frame@7.24.7': + resolution: {integrity: sha512-BcYH1CVJBO9tvyIZ2jVeXgSIMvGZ2FDRvDdOIVQyuklNKSsx+eppDEBq/g47Ayw+RqNFE+URvOShmf+f/qwAlA==} + engines: {node: '>=6.9.0'} + + '@babel/compat-data@7.24.9': + resolution: {integrity: sha512-e701mcfApCJqMMueQI0Fb68Amflj83+dvAvHawoBpAz+GDjCIyGHzNwnefjsWJ3xiYAqqiQFoWbspGYBdb2/ng==} + engines: {node: '>=6.9.0'} + + '@babel/core@7.24.9': + resolution: {integrity: sha512-5e3FI4Q3M3Pbr21+5xJwCv6ZT6KmGkI0vw3Tozy5ODAQFTIWe37iT8Cr7Ice2Ntb+M3iSKCEWMB1MBgKrW3whg==} + engines: {node: '>=6.9.0'} + + '@babel/generator@7.24.10': + resolution: {integrity: sha512-o9HBZL1G2129luEUlG1hB4N/nlYNWHnpwlND9eOMclRqqu1YDy2sSYVCFUZwl8I1Gxh+QSRrP2vD7EpUmFVXxg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-compilation-targets@7.24.8': + resolution: {integrity: sha512-oU+UoqCHdp+nWVDkpldqIQL/i/bvAv53tRqLG/s+cOXxe66zOYLU7ar/Xs3LdmBihrUMEUhwu6dMZwbNOYDwvw==} + engines: {node: '>=6.9.0'} + + '@babel/helper-environment-visitor@7.24.7': + resolution: {integrity: sha512-DoiN84+4Gnd0ncbBOM9AZENV4a5ZiL39HYMyZJGZ/AZEykHYdJw0wW3kdcsh9/Kn+BRXHLkkklZ51ecPKmI1CQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-function-name@7.24.7': + resolution: {integrity: sha512-FyoJTsj/PEUWu1/TYRiXTIHc8lbw+TDYkZuoE43opPS5TrI7MyONBE1oNvfguEXAD9yhQRrVBnXdXzSLQl9XnA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-hoist-variables@7.24.7': + resolution: {integrity: sha512-MJJwhkoGy5c4ehfoRyrJ/owKeMl19U54h27YYftT0o2teQ3FJ3nQUf/I3LlJsX4l3qlw7WRXUmiyajvHXoTubQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-imports@7.24.7': + resolution: {integrity: sha512-8AyH3C+74cgCVVXow/myrynrAGv+nTVg5vKu2nZph9x7RcRwzmh0VFallJuFTZ9mx6u4eSdXZfcOzSqTUm0HCA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-module-transforms@7.24.9': + resolution: {integrity: sha512-oYbh+rtFKj/HwBQkFlUzvcybzklmVdVV3UU+mN7n2t/q3yGHbuVdNxyFvSBO1tfvjyArpHNcWMAzsSPdyI46hw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0 + + '@babel/helper-plugin-utils@7.24.8': + resolution: {integrity: sha512-FFWx5142D8h2Mgr/iPVGH5G7w6jDn4jUSpZTyDnQO0Yn7Ks2Kuz6Pci8H6MPCoUJegd/UZQ3tAvfLCxQSnWWwg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-simple-access@7.24.7': + resolution: {integrity: sha512-zBAIvbCMh5Ts+b86r/CjU+4XGYIs+R1j951gxI3KmmxBMhCg4oQMsv6ZXQ64XOm/cvzfU1FmoCyt6+owc5QMYg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-split-export-declaration@7.24.7': + resolution: {integrity: sha512-oy5V7pD+UvfkEATUKvIjvIAH/xCzfsFVw7ygW2SI6NClZzquT+mwdTfgfdbUiceh6iQO0CHtCPsyze/MZ2YbAA==} + engines: {node: '>=6.9.0'} + + '@babel/helper-string-parser@7.24.8': + resolution: {integrity: sha512-pO9KhhRcuUyGnJWwyEgnRJTSIZHiT+vMD0kPeD+so0l7mxkMT19g3pjY9GTnHySck/hDzq+dtW/4VgnMkippsQ==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.24.7': + resolution: {integrity: sha512-rR+PBcQ1SMQDDyF6X0wxtG8QyLCgUB0eRAGguqRLfkCA87l7yAP7ehq8SNj96OOGTO8OBV70KhuFYcIkHXOg0w==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-option@7.24.8': + resolution: {integrity: sha512-xb8t9tD1MHLungh/AIoWYN+gVHaB9kwlu8gffXGSt3FFEIT7RjS+xWbc2vUD1UTZdIpKj/ab3rdqJ7ufngyi2Q==} + engines: {node: '>=6.9.0'} + + '@babel/helpers@7.24.8': + resolution: {integrity: sha512-gV2265Nkcz7weJJfvDoAEVzC1e2OTDpkGbEsebse8koXUJUXPsCMi7sRo/+SPMuMZ9MtUPnGwITTnQnU5YjyaQ==} + engines: {node: '>=6.9.0'} + + '@babel/highlight@7.24.7': + resolution: {integrity: sha512-EStJpq4OuY8xYfhGVXngigBJRWxftKX9ksiGDnmlY3o7B/V7KIAc9X4oiK87uPJSc/vs5L869bem5fhZa8caZw==} + engines: {node: '>=6.9.0'} + + '@babel/parser@7.24.8': + resolution: {integrity: sha512-WzfbgXOkGzZiXXCqk43kKwZjzwx4oulxZi3nq2TYL9mOjQv6kYwul9mz6ID36njuL7Xkp6nJEfok848Zj10j/w==} + engines: {node: '>=6.0.0'} + hasBin: true + + '@babel/plugin-syntax-jsx@7.24.7': + resolution: {integrity: sha512-6ddciUPe/mpMnOKv/U+RSd2vvVy+Yw/JfBB0ZHYjEZt9NLHmCUylNYlsbqCCS1Bffjlb0fCwC9Vqz+sBz6PsiQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-syntax-typescript@7.24.7': + resolution: {integrity: sha512-c/+fVeJBB0FeKsFvwytYiUD+LBvhHjGSI0g446PRGdSVGZLRNArBUno2PETbAly3tpiNAQR5XaZ+JslxkotsbA==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-self@7.24.7': + resolution: {integrity: sha512-fOPQYbGSgH0HUp4UJO4sMBFjY6DuWq+2i8rixyUMb3CdGixs/gccURvYOAhajBdKDoGajFr3mUq5rH3phtkGzw==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/plugin-transform-react-jsx-source@7.24.7': + resolution: {integrity: sha512-J2z+MWzZHVOemyLweMqngXrgGC42jQ//R0KdxqkIz/OrbVIIlhFI3WigZ5fO+nwFvBlncr4MGapd8vTyc7RPNQ==} + engines: {node: '>=6.9.0'} + peerDependencies: + '@babel/core': ^7.0.0-0 + + '@babel/template@7.24.7': + resolution: {integrity: sha512-jYqfPrU9JTF0PmPy1tLYHW4Mp4KlgxJD9l2nP9fD6yT/ICi554DmrWBAEYpIelzjHf1msDP3PxJIRt/nFNfBig==} + engines: {node: '>=6.9.0'} + + '@babel/traverse@7.24.8': + resolution: {integrity: sha512-t0P1xxAPzEDcEPmjprAQq19NWum4K0EQPjMwZQZbHt+GiZqvjCHjj755Weq1YRPVzBI+3zSfvScfpnuIecVFJQ==} + engines: {node: '>=6.9.0'} + + '@babel/types@7.24.9': + resolution: {integrity: sha512-xm8XrMKz0IlUdocVbYJe0Z9xEgidU7msskG8BbhnTPK/HZ2z/7FP7ykqPgrUH+C+r414mNfNWam1f2vqOjqjYQ==} + engines: {node: '>=6.9.0'} + + '@effect/opentelemetry@0.35.0': + resolution: {integrity: sha512-xnJJjtFbLPDO0o8/9MCYrN20pZCy4GwK1sOuHCvBkgag2m2cS80jCS4naIpZ9VoUmZJ3kklVdIurB+UJmKnmsw==} + peerDependencies: + '@opentelemetry/api': ^1.6 + '@opentelemetry/resources': ^1.22 + '@opentelemetry/sdk-metrics': ^1.22 + '@opentelemetry/sdk-trace-base': ^1.22 + '@opentelemetry/sdk-trace-node': ^1.22 + '@opentelemetry/sdk-trace-web': ^1.22 + '@opentelemetry/semantic-conventions': ^1.24.1 + effect: ^3.6.0 + peerDependenciesMeta: + '@opentelemetry/sdk-metrics': + optional: true + '@opentelemetry/sdk-trace-base': + optional: true + '@opentelemetry/sdk-trace-node': + optional: true + '@opentelemetry/sdk-trace-web': + optional: true + + '@effect/platform@0.61.2': + resolution: {integrity: sha512-cZdR+eNmaOS+YWpEoves5a+CecaW83EG9gcesEdthuXBt1mPUKNJhq5ky062uE+N/tktTvRQswVko/hAl27rkg==} + peerDependencies: + '@effect/schema': ^0.70.1 + effect: ^3.6.0 + + '@effect/schema@0.70.1': + resolution: {integrity: sha512-pxHAmOxWXUs7ZcHBTJUHcMg3ici65bOepxWV7cs//ujv1n9cLQJDQE8phUIzbxtROPcEbZwukGgJy7/HlBjx+w==} + peerDependencies: + effect: ^3.6.0 + + '@esbuild/aix-ppc64@0.21.5': + resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.21.5': + resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.21.5': + resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.21.5': + resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.21.5': + resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.21.5': + resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.21.5': + resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.21.5': + resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.21.5': + resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.21.5': + resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.21.5': + resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.21.5': + resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.21.5': + resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.21.5': + resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.21.5': + resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.21.5': + resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.21.5': + resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-x64@0.21.5': + resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-x64@0.21.5': + resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + + '@esbuild/sunos-x64@0.21.5': + resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.21.5': + resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.21.5': + resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.21.5': + resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + + '@eslint-community/eslint-utils@4.4.0': + resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + + '@eslint-community/regexpp@4.11.0': + resolution: {integrity: sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + + '@eslint/eslintrc@2.1.4': + resolution: {integrity: sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@eslint/js@8.57.0': + resolution: {integrity: sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + '@humanwhocodes/config-array@0.11.14': + resolution: {integrity: sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==} + engines: {node: '>=10.10.0'} + deprecated: Use @eslint/config-array instead + + '@humanwhocodes/module-importer@1.0.1': + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + + '@humanwhocodes/object-schema@2.0.3': + resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==} + deprecated: Use @eslint/object-schema instead + + '@isaacs/cliui@8.0.2': + resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} + engines: {node: '>=12'} + + '@jridgewell/gen-mapping@0.3.5': + resolution: {integrity: sha512-IzL8ZoEDIBRWEzlCcRhOaCupYyN5gdIK+Q6fbFdPDg6HqX6jpkItn7DFIpW9LQzXG6Df9sA7+OKnq0qlz/GaQg==} + engines: {node: '>=6.0.0'} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/set-array@1.2.1': + resolution: {integrity: sha512-R8gLRTZeyp03ymzP/6Lil/28tGeGEzhx1q2k703KGWRAI1VdvPIXdG70VJc2pAMw3NA6JKL5hhFu1sJX0Mnn/A==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.0': + resolution: {integrity: sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==} + + '@jridgewell/trace-mapping@0.3.25': + resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==} + + '@nodelib/fs.scandir@2.1.5': + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + + '@nodelib/fs.stat@2.0.5': + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + '@nodelib/fs.walk@1.2.8': + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + + '@opentelemetry/api-logs@0.52.1': + resolution: {integrity: sha512-qnSqB2DQ9TPP96dl8cDubDvrUyWc0/sK81xHTK8eSUspzDM3bsewX903qclQFvVhgStjRWdC5bLb3kQqMkfV5A==} + engines: {node: '>=14'} + + '@opentelemetry/api@1.9.0': + resolution: {integrity: sha512-3giAOQvZiH5F9bMlMiv8+GSPMeqg0dbaeo58/0SlA9sxSqZhnUtxzX9/2FzyhS9sWQf5S0GJE0AKBrFqjpeYcg==} + engines: {node: '>=8.0.0'} + + '@opentelemetry/context-async-hooks@1.25.1': + resolution: {integrity: sha512-UW/ge9zjvAEmRWVapOP0qyCvPulWU6cQxGxDbWEFfGOj1VBBZAuOqTo3X6yWmDTD3Xe15ysCZChHncr2xFMIfQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/core@1.25.1': + resolution: {integrity: sha512-GeT/l6rBYWVQ4XArluLVB6WWQ8flHbdb6r2FCHC3smtdOAbrJBIv35tpV/yp9bmYUJf+xmZpu9DRTIeJVhFbEQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/exporter-trace-otlp-http@0.52.1': + resolution: {integrity: sha512-05HcNizx0BxcFKKnS5rwOV+2GevLTVIRA0tRgWYyw4yCgR53Ic/xk83toYKts7kbzcI+dswInUg/4s8oyA+tqg==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/otlp-exporter-base@0.52.1': + resolution: {integrity: sha512-z175NXOtX5ihdlshtYBe5RpGeBoTXVCKPPLiQlD6FHvpM4Ch+p2B0yWKYSrBfLH24H9zjJiBdTrtD+hLlfnXEQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': ^1.0.0 + + '@opentelemetry/otlp-transformer@0.52.1': + resolution: {integrity: sha512-I88uCZSZZtVa0XniRqQWKbjAUm73I8tpEy/uJYPPYw5d7BRdVk0RfTBQw8kSUl01oVWEuqxLDa802222MYyWHg==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/propagator-b3@1.25.1': + resolution: {integrity: sha512-p6HFscpjrv7//kE+7L+3Vn00VEDUJB0n6ZrjkTYHrJ58QZ8B3ajSJhRbCcY6guQ3PDjTbxWklyvIN2ojVbIb1A==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/propagator-jaeger@1.25.1': + resolution: {integrity: sha512-nBprRf0+jlgxks78G/xq72PipVK+4or9Ypntw0gVZYNTCSK8rg5SeaGV19tV920CMqBD/9UIOiFr23Li/Q8tiA==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/resources@1.25.1': + resolution: {integrity: sha512-pkZT+iFYIZsVn6+GzM0kSX+u3MSLCY9md+lIJOoKl/P+gJFfxJte/60Usdp8Ce4rOs8GduUpSPNe1ddGyDT1sQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-logs@0.52.1': + resolution: {integrity: sha512-MBYh+WcPPsN8YpRHRmK1Hsca9pVlyyKd4BxOC4SsgHACnl/bPp4Cri9hWhVm5+2tiQ9Zf4qSc1Jshw9tOLGWQA==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.4.0 <1.10.0' + + '@opentelemetry/sdk-metrics@1.25.1': + resolution: {integrity: sha512-9Mb7q5ioFL4E4dDrc4wC/A3NTHDat44v4I3p2pLPSxRvqUbDIQyMVr9uK+EU69+HWhlET1VaSrRzwdckWqY15Q==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.3.0 <1.10.0' + + '@opentelemetry/sdk-trace-base@1.25.1': + resolution: {integrity: sha512-C8k4hnEbc5FamuZQ92nTOp8X/diCY56XUTnMiv9UTuJitCzaNNHAVsdm5+HLCdI8SLQsLWIrG38tddMxLVoftw==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-trace-node@1.25.1': + resolution: {integrity: sha512-nMcjFIKxnFqoez4gUmihdBrbpsEnAX/Xj16sGvZm+guceYE0NE00vLhpDVK6f3q8Q4VFI5xG8JjlXKMB/SkTTQ==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/sdk-trace-web@1.25.1': + resolution: {integrity: sha512-SS6JaSkHngcBCNdWGthzcvaKGRnDw2AeP57HyTEileLToJ7WLMeV+064iRlVyoT4+e77MRp2T2dDSrmaUyxoNg==} + engines: {node: '>=14'} + peerDependencies: + '@opentelemetry/api': '>=1.0.0 <1.10.0' + + '@opentelemetry/semantic-conventions@1.25.1': + resolution: {integrity: sha512-ZDjMJJQRlyk8A1KZFCc+bCbsyrn1wTwdNt56F7twdfUfnHUZUq77/WfONCj8p72NZOyP7pNTdUWSTYC3GTbuuQ==} + engines: {node: '>=14'} + + '@pkgjs/parseargs@0.11.0': + resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} + engines: {node: '>=14'} + + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + + '@rollup/rollup-android-arm-eabi@4.19.0': + resolution: {integrity: sha512-JlPfZ/C7yn5S5p0yKk7uhHTTnFlvTgLetl2VxqE518QgyM7C9bSfFTYvB/Q/ftkq0RIPY4ySxTz+/wKJ/dXC0w==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.19.0': + resolution: {integrity: sha512-RDxUSY8D1tWYfn00DDi5myxKgOk6RvWPxhmWexcICt/MEC6yEMr4HNCu1sXXYLw8iAsg0D44NuU+qNq7zVWCrw==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.19.0': + resolution: {integrity: sha512-emvKHL4B15x6nlNTBMtIaC9tLPRpeA5jMvRLXVbl/W9Ie7HhkrE7KQjvgS9uxgatL1HmHWDXk5TTS4IaNJxbAA==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.19.0': + resolution: {integrity: sha512-fO28cWA1dC57qCd+D0rfLC4VPbh6EOJXrreBmFLWPGI9dpMlER2YwSPZzSGfq11XgcEpPukPTfEVFtw2q2nYJg==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-linux-arm-gnueabihf@4.19.0': + resolution: {integrity: sha512-2Rn36Ubxdv32NUcfm0wB1tgKqkQuft00PtM23VqLuCUR4N5jcNWDoV5iBC9jeGdgS38WK66ElncprqgMUOyomw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.19.0': + resolution: {integrity: sha512-gJuzIVdq/X1ZA2bHeCGCISe0VWqCoNT8BvkQ+BfsixXwTOndhtLUpOg0A1Fcx/+eA6ei6rMBzlOz4JzmiDw7JQ==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.19.0': + resolution: {integrity: sha512-0EkX2HYPkSADo9cfeGFoQ7R0/wTKb7q6DdwI4Yn/ULFE1wuRRCHybxpl2goQrx4c/yzK3I8OlgtBu4xvted0ug==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.19.0': + resolution: {integrity: sha512-GlIQRj9px52ISomIOEUq/IojLZqzkvRpdP3cLgIE1wUWaiU5Takwlzpz002q0Nxxr1y2ZgxC2obWxjr13lvxNQ==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-powerpc64le-gnu@4.19.0': + resolution: {integrity: sha512-N6cFJzssruDLUOKfEKeovCKiHcdwVYOT1Hs6dovDQ61+Y9n3Ek4zXvtghPPelt6U0AH4aDGnDLb83uiJMkWYzQ==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.19.0': + resolution: {integrity: sha512-2DnD3mkS2uuam/alF+I7M84koGwvn3ZVD7uG+LEWpyzo/bq8+kKnus2EVCkcvh6PlNB8QPNFOz6fWd5N8o1CYg==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.19.0': + resolution: {integrity: sha512-D6pkaF7OpE7lzlTOFCB2m3Ngzu2ykw40Nka9WmKGUOTS3xcIieHe82slQlNq69sVB04ch73thKYIWz/Ian8DUA==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.19.0': + resolution: {integrity: sha512-HBndjQLP8OsdJNSxpNIN0einbDmRFg9+UQeZV1eiYupIRuZsDEoeGU43NQsS34Pp166DtwQOnpcbV/zQxM+rWA==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.19.0': + resolution: {integrity: sha512-HxfbvfCKJe/RMYJJn0a12eiOI9OOtAUF4G6ozrFUK95BNyoJaSiBjIOHjZskTUffUrB84IPKkFG9H9nEvJGW6A==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-win32-arm64-msvc@4.19.0': + resolution: {integrity: sha512-HxDMKIhmcguGTiP5TsLNolwBUK3nGGUEoV/BO9ldUBoMLBssvh4J0X8pf11i1fTV7WShWItB1bKAKjX4RQeYmg==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.19.0': + resolution: {integrity: sha512-xItlIAZZaiG/u0wooGzRsx11rokP4qyc/79LkAOdznGRAbOFc+SfEdfUOszG1odsHNgwippUJavag/+W/Etc6Q==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.19.0': + resolution: {integrity: sha512-xNo5fV5ycvCCKqiZcpB65VMR11NJB+StnxHz20jdqRAktfdfzhgjTiJ2doTDQE/7dqGaV5I7ZGqKpgph6lCIag==} + cpu: [x64] + os: [win32] + + '@sentry-internal/browser-utils@8.22.0': + resolution: {integrity: sha512-R0u8KPaSivueIwUOhmYxcisKaJq3gx+I0xOcWoluDB3OI1Ds/QOSP/vmTsMg/mjwG/nUJ8RRM8pj0s8vlqCrjg==} + engines: {node: '>=14.18'} + + '@sentry-internal/feedback@8.22.0': + resolution: {integrity: sha512-Sy2+v0xBmVnZ5LQ48603CvLy5vVQvAZ+hc9xQSAHexts07NkvApMU1qv26YNwxlAWfDha1wXiW6ryd4YDzaoVA==} + engines: {node: '>=14.18'} + + '@sentry-internal/replay-canvas@8.22.0': + resolution: {integrity: sha512-/gV8qN3JqWw0LXTMuCGB8RDI8Bx1VESNRBdh/7Cmc5+hxYBfcketuix3S8mHWcE/JO+Ed9g1Abzys6GphTB9LA==} + engines: {node: '>=14.18'} + + '@sentry-internal/replay@8.22.0': + resolution: {integrity: sha512-sF8RyMPJP1fSIyyBDAbtybvKCu0dy8ZAfMwLP7ZqEnWrhZqktVuqM7/++EAFMlD5YaWJXm1IDuOXjgSQjUtSIQ==} + engines: {node: '>=14.18'} + + '@sentry/browser@8.22.0': + resolution: {integrity: sha512-t3b+/9WWcP9SQTWwrHrB57B33ENgmUjyFlW2+JSlCXkSJBSmAoquPZ/GPjOuPaSr3HIA0mu9uEr4A41d5diASQ==} + engines: {node: '>=14.18'} + + '@sentry/core@8.22.0': + resolution: {integrity: sha512-fYPnxp7UkY2tckaOtivIySxnJvlbekuxs+Qi6rkUv9JpF+TYKpt7OPNUAbgVIhS0xazAEN6iKTfmnmpUbFRLmQ==} + engines: {node: '>=14.18'} + + '@sentry/react@8.22.0': + resolution: {integrity: sha512-LcO8SPfjYsx3Zvg1mQwjreVvtriVxde+6njIJyXU9TArB0e8bFexvd4MGXdBExgW9aY449hNaStgKRWMNHeVHQ==} + engines: {node: '>=14.18'} + peerDependencies: + react: ^16.14.0 || 17.x || 18.x || 19.x + + '@sentry/types@8.22.0': + resolution: {integrity: sha512-1MLK3xO+uF2oJaa+M98aLIrQsEHzV7xnVWPfE3MhejYLNQebj4rQnQKTut/xZNIF9W0Q+bRcakLarC3ce2a74g==} + engines: {node: '>=14.18'} + + '@sentry/utils@8.22.0': + resolution: {integrity: sha512-0ITG2+3EtyMtyc/nQG8aB9z9eIQ4L43nM/KuNgYSnM1vPl/zegbaLT0Ek/xkQB1OLIOLkEPQ6x9GWe+248/n3g==} + engines: {node: '>=14.18'} + + '@tanstack/history@1.45.3': + resolution: {integrity: sha512-n4XXInV9irIq0obRvINIkESkGk280Q+xkIIbswmM0z9nAu2wsIRZNvlmPrtYh6bgNWtItOWWoihFUjLTW8g6Jg==} + engines: {node: '>=12'} + + '@tanstack/query-core@5.51.21': + resolution: {integrity: sha512-POQxm42IUp6n89kKWF4IZi18v3fxQWFRolvBA6phNVmA8psdfB1MvDnGacCJdS+EOX12w/CyHM62z//rHmYmvw==} + + '@tanstack/query-devtools@5.51.16': + resolution: {integrity: sha512-ajwuq4WnkNCMj/Hy3KR8d3RtZ6PSKc1dD2vs2T408MdjgKzQ3klVoL6zDgVO7X+5jlb5zfgcO3thh4ojPhfIaw==} + + '@tanstack/react-query-devtools@5.51.21': + resolution: {integrity: sha512-mi5ef8dvsS48GsG6/8M60O2EgrzPK1kNPngOcHBTlIUrB5dGkxP9fuHf05GQRxtSp5W5GlyeUpzOmtkKNpf9dQ==} + peerDependencies: + '@tanstack/react-query': ^5.51.21 + react: ^18 || ^19 + + '@tanstack/react-query@5.51.21': + resolution: {integrity: sha512-Q/V81x3sAYgCsxjwOkfLXfrmoG+FmDhLeHH5okC/Bp8Aaw2c33lbEo/mMcMnkxUPVtB2FLpzHT0tq3c+OlZEbw==} + peerDependencies: + react: ^18.0.0 + + '@tanstack/react-router@1.45.11': + resolution: {integrity: sha512-unVxPYsOwwMnyAW/ZeOCymWpstEV0tiUexUPqiCuYwb2rCwsrmjsgB4tIpVONHabPlFKrxoMbkUh5UCSpL0h+w==} + engines: {node: '>=12'} + peerDependencies: + react: '>=18' + react-dom: '>=18' + + '@tanstack/react-store@0.5.5': + resolution: {integrity: sha512-1orYXGatBqXCYKuroFwV8Ll/6aDa5E3pU6RR4h7RvRk7TmxF1+zLCsWALZaeijXkySNMGmvawSbUXRypivg2XA==} + peerDependencies: + react: ^17.0.0 || ^18.0.0 + react-dom: ^17.0.0 || ^18.0.0 + + '@tanstack/router-devtools@1.45.11': + resolution: {integrity: sha512-Na6L1lU00m18eqnWd4/Ex51LnCPGbsDpNNRu551tv3hlrtX9at4eJeI0ZOhv9IAKI2Ky3cW4OhvvHrtTalLCJg==} + engines: {node: '>=12'} + peerDependencies: + '@tanstack/react-router': ^1.45.11 + react: '>=18' + react-dom: '>=18' + + '@tanstack/router-generator@1.45.7': + resolution: {integrity: sha512-5B756YXpZO0/yr7ahsxXoBOCpqroLx/D3l6X9qPlZaP0FVHVmTR6ZKGRX6zzuAxns/VO+sxnQso4AYLdDyZ9GA==} + engines: {node: '>=12'} + + '@tanstack/router-plugin@1.45.8': + resolution: {integrity: sha512-mncDu49pBEivRHWVOdCl9fHwtyUT3QLZ0X+gYIsm7o6LhT6i7BzC++BZqOOSrjDcyXy3dx2Nf/FMrB06qlXx9Q==} + engines: {node: '>=12'} + peerDependencies: + '@rsbuild/core': '>=0.7.9' + vite: '>=5.0.13' + webpack: '>=5.92.0' + peerDependenciesMeta: + '@rsbuild/core': + optional: true + vite: + optional: true + webpack: + optional: true + + '@tanstack/store@0.5.5': + resolution: {integrity: sha512-EOSrgdDAJExbvRZEQ/Xhh9iZchXpMN+ga1Bnk8Nmygzs8TfiE6hbzThF+Pr2G19uHL6+DTDTHhJ8VQiOd7l4tA==} + + '@types/babel__core@7.20.5': + resolution: {integrity: sha512-qoQprZvz5wQFJwMDqeseRXWv3rqMvhgpbXFfVyWhbx9X47POIA6i/+dXefEmZKoAgOaTdaIgNSMqMIU61yRyzA==} + + '@types/babel__generator@7.6.8': + resolution: {integrity: sha512-ASsj+tpEDsEiFr1arWrlN6V3mdfjRMZt6LtK/Vp/kreFLnr5QH5+DhvD5nINYZXzwJvXeGq+05iUXcAzVrqWtw==} + + '@types/babel__template@7.4.4': + resolution: {integrity: sha512-h/NUaSyG5EyxBIp8YRxo4RMe2/qQgvyowRwVMzhYhBCONbW8PUsg4lkFMrhgZhUe5z3L3MiLDuvyJ/CaPa2A8A==} + + '@types/babel__traverse@7.20.6': + resolution: {integrity: sha512-r1bzfrm0tomOI8g1SzvCaQHo6Lcv6zu0EA+W2kHrt8dyrHQxGzBBL4kdkzIS+jBMV+EYcMAEAqXqYaLJq5rOZg==} + + '@types/estree@1.0.5': + resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} + + '@types/node@22.1.0': + resolution: {integrity: sha512-AOmuRF0R2/5j1knA3c6G3HOk523Ga+l+ZXltX8SF1+5oqcXijjfTd8fY3XRZqSihEu9XhtQnKYLmkFaoxgsJHw==} + + '@types/prop-types@15.7.12': + resolution: {integrity: sha512-5zvhXYtRNRluoE/jAp4GVsSduVUzNWKkOZrCDBWYtE7biZywwdC2AcEzg+cSMLFRfVgeAFqpfNabiPjxFddV1Q==} + + '@types/react-dom@18.3.0': + resolution: {integrity: sha512-EhwApuTmMBmXuFOikhQLIBUn6uFg81SwLMOAUgodJF14SOBOCMdU04gDoYi0WOJJHD144TL32z4yDqCW3dnkQg==} + + '@types/react@18.3.3': + resolution: {integrity: sha512-hti/R0pS0q1/xx+TsI73XIqk26eBsISZ2R0wUijXIngRK9R/e7Xw/cXVxQK7R5JjW+SV4zGcn5hXjudkN/pLIw==} + + '@typescript-eslint/eslint-plugin@7.17.0': + resolution: {integrity: sha512-pyiDhEuLM3PuANxH7uNYan1AaFs5XE0zw1hq69JBvGvE7gSuEoQl1ydtEe/XQeoC3GQxLXyOVa5kNOATgM638A==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + '@typescript-eslint/parser': ^7.0.0 + eslint: ^8.56.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/parser@7.17.0': + resolution: {integrity: sha512-puiYfGeg5Ydop8eusb/Hy1k7QmOU6X3nvsqCgzrB2K4qMavK//21+PzNE8qeECgNOIoertJPUC1SpegHDI515A==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + eslint: ^8.56.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/scope-manager@7.17.0': + resolution: {integrity: sha512-0P2jTTqyxWp9HiKLu/Vemr2Rg1Xb5B7uHItdVZ6iAenXmPo4SZ86yOPCJwMqpCyaMiEHTNqizHfsbmCFT1x9SA==} + engines: {node: ^18.18.0 || >=20.0.0} + + '@typescript-eslint/type-utils@7.17.0': + resolution: {integrity: sha512-XD3aaBt+orgkM/7Cei0XNEm1vwUxQ958AOLALzPlbPqb8C1G8PZK85tND7Jpe69Wualri81PLU+Zc48GVKIMMA==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + eslint: ^8.56.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/types@7.17.0': + resolution: {integrity: sha512-a29Ir0EbyKTKHnZWbNsrc/gqfIBqYPwj3F2M+jWE/9bqfEHg0AMtXzkbUkOG6QgEScxh2+Pz9OXe11jHDnHR7A==} + engines: {node: ^18.18.0 || >=20.0.0} + + '@typescript-eslint/typescript-estree@7.17.0': + resolution: {integrity: sha512-72I3TGq93t2GoSBWI093wmKo0n6/b7O4j9o8U+f65TVD0FS6bI2180X5eGEr8MA8PhKMvYe9myZJquUT2JkCZw==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + + '@typescript-eslint/utils@7.17.0': + resolution: {integrity: sha512-r+JFlm5NdB+JXc7aWWZ3fKSm1gn0pkswEwIYsrGPdsT2GjsRATAKXiNtp3vgAAO1xZhX8alIOEQnNMl3kbTgJw==} + engines: {node: ^18.18.0 || >=20.0.0} + peerDependencies: + eslint: ^8.56.0 + + '@typescript-eslint/visitor-keys@7.17.0': + resolution: {integrity: sha512-RVGC9UhPOCsfCdI9pU++K4nD7to+jTcMIbXTSOcrLqUEW6gF2pU1UUbYJKc9cvcRSK1UDeMJ7pdMxf4bhMpV/A==} + engines: {node: ^18.18.0 || >=20.0.0} + + '@ungap/structured-clone@1.2.0': + resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==} + + '@vitejs/plugin-react@4.3.1': + resolution: {integrity: sha512-m/V2syj5CuVnaxcUJOQRel/Wr31FFXRFlnOoq1TVtkCxsY5veGMTEmpWHndrhB2U8ScHtCQB1e+4hWYExQc6Lg==} + engines: {node: ^14.18.0 || >=16.0.0} + peerDependencies: + vite: ^4.2.0 || ^5.0.0 + + acorn-jsx@5.3.2: + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + + acorn@8.12.1: + resolution: {integrity: sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==} + engines: {node: '>=0.4.0'} + hasBin: true + + ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-regex@6.0.1: + resolution: {integrity: sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==} + engines: {node: '>=12'} + + ansi-styles@3.2.1: + resolution: {integrity: sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==} + engines: {node: '>=4'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + ansi-styles@6.2.1: + resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} + engines: {node: '>=12'} + + any-promise@1.3.0: + resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} + + anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + + arg@5.0.2: + resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + autoprefixer@10.4.19: + resolution: {integrity: sha512-BaENR2+zBZ8xXhM4pUaKUxlVdxZ0EZhjvbopwnXmxRUfqDmwSpC2lAi/QXvx7NRdPCo1WKEcEF6mV64si1z4Ew==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + babel-dead-code-elimination@1.0.6: + resolution: {integrity: sha512-JxFi9qyRJpN0LjEbbjbN8g0ux71Qppn9R8Qe3k6QzHg2CaKsbUQtbn307LQGiDLGjV6JCtEFqfxzVig9MyDCHQ==} + + balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + + brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + + braces@3.0.3: + resolution: {integrity: sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==} + engines: {node: '>=8'} + + browserslist@4.23.2: + resolution: {integrity: sha512-qkqSyistMYdxAcw+CzbZwlBy8AGmS/eEWs+sEV5TnLRGDOL+C5M2EnH6tlZyg0YoAxGJAFKh61En9BR941GnHA==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + + camelcase-css@2.0.1: + resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} + engines: {node: '>= 6'} + + caniuse-lite@1.0.30001643: + resolution: {integrity: sha512-ERgWGNleEilSrHM6iUz/zJNSQTP8Mr21wDWpdgvRwcTXGAq6jMtOUPP4dqFPTdKqZ2wKTdtB+uucZ3MRpAUSmg==} + + chalk@2.4.2: + resolution: {integrity: sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==} + engines: {node: '>=4'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + chokidar@3.6.0: + resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==} + engines: {node: '>= 8.10.0'} + + clsx@2.1.1: + resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==} + engines: {node: '>=6'} + + color-convert@1.9.3: + resolution: {integrity: sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-name@1.1.3: + resolution: {integrity: sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + commander@4.1.1: + resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} + engines: {node: '>= 6'} + + concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + convert-source-map@2.0.0: + resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} + + cosmiconfig@9.0.0: + resolution: {integrity: sha512-itvL5h8RETACmOTFc4UfIyB2RfEHi71Ax6E/PivVxq9NseKbOWpeyHEOIbmAw1rs8Ak0VursQNww7lf7YtUwzg==} + engines: {node: '>=14'} + peerDependencies: + typescript: '>=4.9.5' + peerDependenciesMeta: + typescript: + optional: true + + cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + + debug@4.3.5: + resolution: {integrity: sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + + didyoumean@1.2.2: + resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + + dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + + dlv@1.1.3: + resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + + doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + + eastasianwidth@0.2.0: + resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + + effect@3.6.0: + resolution: {integrity: sha512-7bUV/Ytt058F3YWZJyra63MGA80msTk4AnFiVlvCM65RN7afc3LsvLOHpMifLqX/8CG0eDlJLg2lDLByyEnYGQ==} + + electron-to-chromium@1.5.2: + resolution: {integrity: sha512-kc4r3U3V3WLaaZqThjYz/Y6z8tJe+7K0bbjUVo3i+LWIypVdMx5nXCkwRe6SWbY6ILqLdc1rKcKmr3HoH7wjSQ==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + emoji-regex@9.2.2: + resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} + + env-paths@2.2.1: + resolution: {integrity: sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==} + engines: {node: '>=6'} + + error-ex@1.3.2: + resolution: {integrity: sha512-7dFHNmqeFSEt2ZBsCriorKnn3Z2pj+fd9kmI6QoWw4//DL+icEBfc0U7qJCisqrTsKTjw4fNFy2pW9OqStD84g==} + + esbuild@0.21.5: + resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} + engines: {node: '>=12'} + hasBin: true + + escalade@3.1.2: + resolution: {integrity: sha512-ErCHMCae19vR8vQGe50xIsVomy19rg6gFu3+r3jkEO46suLMWBksvVyoGgQV+jOfl84ZSOSlmv6Gxa89PmTGmA==} + engines: {node: '>=6'} + + escape-string-regexp@1.0.5: + resolution: {integrity: sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==} + engines: {node: '>=0.8.0'} + + escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + + eslint-plugin-react-hooks@4.6.2: + resolution: {integrity: sha512-QzliNJq4GinDBcD8gPB5v0wh6g8q3SUi6EFF0x8N/BL9PoVs0atuGc47ozMRyOWAKdwaZ5OnbOEa3WR+dSGKuQ==} + engines: {node: '>=10'} + peerDependencies: + eslint: ^3.0.0 || ^4.0.0 || ^5.0.0 || ^6.0.0 || ^7.0.0 || ^8.0.0-0 + + eslint-plugin-react-refresh@0.4.9: + resolution: {integrity: sha512-QK49YrBAo5CLNLseZ7sZgvgTy21E6NEw22eZqc4teZfH8pxV3yXc9XXOYfUI6JNpw7mfHNkAeWtBxrTyykB6HA==} + peerDependencies: + eslint: '>=7' + + eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + eslint@8.57.0: + resolution: {integrity: sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + + espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + esquery@1.6.0: + resolution: {integrity: sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==} + engines: {node: '>=0.10'} + + esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + + estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + fast-check@3.20.0: + resolution: {integrity: sha512-pZIjqLpOZgdSLecec4GKC3Zq5702MZ34upMKxojnNVSWA0K64V3pXOBT1Wdsrc3AphLtzRBbsi8bRWF4TUGmUg==} + engines: {node: '>=8.0.0'} + + fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + + fast-glob@3.3.2: + resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} + engines: {node: '>=8.6.0'} + + fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + + fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + + fastq@1.17.1: + resolution: {integrity: sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==} + + file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + + fill-range@7.1.1: + resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} + engines: {node: '>=8'} + + find-my-way-ts@0.1.5: + resolution: {integrity: sha512-4GOTMrpGQVzsCH2ruUn2vmwzV/02zF4q+ybhCIrw/Rkt3L8KWcycdC6aJMctJzwN4fXD4SD5F/4B9Sksh5rE0A==} + + find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + + flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + + flatted@3.3.1: + resolution: {integrity: sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==} + + foreground-child@3.2.1: + resolution: {integrity: sha512-PXUUyLqrR2XCWICfv6ukppP96sdFwWbNEnfEMt7jNsISjMsvaLNinAHNDYyvkyU+SZG2BTSbT5NjG+vZslfGTA==} + engines: {node: '>=14'} + + fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + + fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + gensync@1.0.0-beta.2: + resolution: {integrity: sha512-3hN7NaskYvMDLQY55gnW3NQ+mesEAepTqlg+VEbj7zzqEMBVNhzcGYYeqFo/TlYz6eQiFcp1HcsCZO+nGgS8zg==} + engines: {node: '>=6.9.0'} + + glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + + glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + + glob@10.4.5: + resolution: {integrity: sha512-7Bv8RF0k6xjo7d4A/PxYLbUCfb6c+Vpd2/mB2yRDlew7Jb5hEXiCD9ibfO7wpk8i4sevK6DFny9h7EYbM3/sHg==} + hasBin: true + + glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + deprecated: Glob versions prior to v9 are no longer supported + + globals@11.12.0: + resolution: {integrity: sha512-WOBp/EEGUiIsJSp7wcv/y6MO+lV9UoncWqxuFfm8eBwzWNgyfBd6Gz+IeKQ9jCmyhoH99g15M3T+QaVHFjizVA==} + engines: {node: '>=4'} + + globals@13.24.0: + resolution: {integrity: sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==} + engines: {node: '>=8'} + + globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + + goober@2.1.14: + resolution: {integrity: sha512-4UpC0NdGyAFqLNPnhCT2iHpza2q+RAY3GV85a/mRPdzyPQMsj0KmMMuetdIkzWRbJ+Hgau1EZztq8ImmiMGhsg==} + peerDependencies: + csstype: ^3.0.10 + + graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + + has-flag@3.0.0: + resolution: {integrity: sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==} + engines: {node: '>=4'} + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + hoist-non-react-statics@3.3.2: + resolution: {integrity: sha512-/gGivxi8JPKWNm/W0jSmzcMPpfpPLc3dY/6GxhX2hQ9iGj3aDfklV4ET7NjKpSinLpJ5vafa9iiGIEZg10SfBw==} + + ignore@5.3.1: + resolution: {integrity: sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==} + engines: {node: '>= 4'} + + import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + + imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + + inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + deprecated: This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful. + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + is-arrayish@0.2.1: + resolution: {integrity: sha512-zz06S8t0ozoDXMG+ube26zeCTNXcKIPJZJi8hBrF4idCLms4CG9QtK7qBl1boi5ODzFpjswb5JPmHCbMpjaYzg==} + + is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + + is-core-module@2.15.0: + resolution: {integrity: sha512-Dd+Lb2/zvk9SKy1TGCt1wFJFo/MWBPMX5x7KcvLajWTGuomczdQX61PvY5yK6SVACwpoexWo81IfFyoKY2QnTA==} + engines: {node: '>= 0.4'} + + is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + + is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + jackspeak@3.4.3: + resolution: {integrity: sha512-OGlZQpz2yfahA/Rd1Y8Cd9SIEsqvXkLVoSw/cgwhnhFMDbsQFeZYoJJ7bIZBS9BcamUW96asq/npPWugM+RQBw==} + + jiti@1.21.6: + resolution: {integrity: sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==} + hasBin: true + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + + jsesc@2.5.2: + resolution: {integrity: sha512-OYu7XEzjkCQ3C5Ps3QIZsQfNpqoJyZZA99wd9aWd05NCtC5pWOkShK2mkL6HXQR6/Cy2lbNdPlZBpuQHXE63gA==} + engines: {node: '>=4'} + hasBin: true + + json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + json-parse-even-better-errors@2.3.1: + resolution: {integrity: sha512-xyFwyhro/JEof6Ghe2iz2NcXoj2sloNsWr/XsERDK/oiPCfaNhl5ONfp+jQdAZRQQ0IJWNzH9zIZF7li91kh2w==} + + json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + + json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + + json5@2.2.3: + resolution: {integrity: sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg==} + engines: {node: '>=6'} + hasBin: true + + keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + + levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + + lilconfig@2.1.0: + resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} + engines: {node: '>=10'} + + lilconfig@3.1.2: + resolution: {integrity: sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==} + engines: {node: '>=14'} + + lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + + locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + + lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + + long@5.2.3: + resolution: {integrity: sha512-lcHwpNoggQTObv5apGNCTdJrO69eHOZMi4BNC+rTLER8iHAqGrUVeLh/irVIM7zTw2bOXA8T6uNPeujwOLg/2Q==} + + loose-envify@1.4.0: + resolution: {integrity: sha512-lyuxPGr/Wfhrlem2CL/UcnUc1zcqKAImBDzukY7Y5F/yQiNdko6+fRLevlw1HgMySw7f611UIY408EtxRSoK3Q==} + hasBin: true + + lru-cache@10.4.3: + resolution: {integrity: sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ==} + + lru-cache@5.1.1: + resolution: {integrity: sha512-KpNARQA3Iwv+jTA0utUVVbrh+Jlrr1Fv0e56GGzAFOXN7dk/FviaDW8LHmK52DlcH4WP2n6gI8vN1aesBFgo9w==} + + merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + micromatch@4.0.7: + resolution: {integrity: sha512-LPP/3KorzCwBxfeUuZmaR6bG2kdeHSbe0P2tY3FLRU4vYrjYz5hI4QZwV0njUx3jeuKe67YukQ1LSPZBKDqO/Q==} + engines: {node: '>=8.6'} + + minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + + minimatch@9.0.5: + resolution: {integrity: sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==} + engines: {node: '>=16 || 14 >=14.17'} + + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + + multipasta@0.2.3: + resolution: {integrity: sha512-ck6FVWpay4fzG/qHn804fSGnVHUvTLljzKAUd9LE1Gw61aIxyU3PrAlOHQBFlP4sGvbWOmf7L1y32Dvlvq3KQg==} + + mz@2.7.0: + resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + + nanoid@3.3.7: + resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + + node-releases@2.0.18: + resolution: {integrity: sha512-d9VeXT4SJ7ZeOqGX6R5EM022wpL+eWPooLI+5UpWn2jCT1aosUQEhQP214x33Wkwx3JQMvIm+tIoVOdodFS40g==} + + normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-hash@3.0.0: + resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} + engines: {node: '>= 6'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + optionator@0.9.4: + resolution: {integrity: sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==} + engines: {node: '>= 0.8.0'} + + p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + + p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + + package-json-from-dist@1.0.0: + resolution: {integrity: sha512-dATvCeZN/8wQsGywez1mzHtTlP22H8OEfPrVMLNr4/eGa+ijtLn/6M5f0dY8UKNrC2O9UCU6SSoG3qRKnt7STw==} + + parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + + parse-json@5.2.0: + resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} + engines: {node: '>=8'} + + path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + + path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + path-scurry@1.11.1: + resolution: {integrity: sha512-Xa4Nw17FS9ApQFJ9umLiJS4orGjm7ZzwUrwamcGQuHSzDyth9boKDaycYdDcZDuqYATXw4HFXgaqWTctW/v1HA==} + engines: {node: '>=16 || 14 >=14.18'} + + path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + picocolors@1.0.1: + resolution: {integrity: sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==} + + picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + pify@2.3.0: + resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} + engines: {node: '>=0.10.0'} + + pirates@4.0.6: + resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==} + engines: {node: '>= 6'} + + postcss-import@15.1.0: + resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==} + engines: {node: '>=14.0.0'} + peerDependencies: + postcss: ^8.0.0 + + postcss-js@4.0.1: + resolution: {integrity: sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==} + engines: {node: ^12 || ^14 || >= 16} + peerDependencies: + postcss: ^8.4.21 + + postcss-load-config@4.0.2: + resolution: {integrity: sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==} + engines: {node: '>= 14'} + peerDependencies: + postcss: '>=8.0.9' + ts-node: '>=9.0.0' + peerDependenciesMeta: + postcss: + optional: true + ts-node: + optional: true + + postcss-loader@8.1.1: + resolution: {integrity: sha512-0IeqyAsG6tYiDRCYKQJLAmgQr47DX6N7sFSWvQxt6AcupX8DIdmykuk/o/tx0Lze3ErGHJEp5OSRxrelC6+NdQ==} + engines: {node: '>= 18.12.0'} + peerDependencies: + '@rspack/core': 0.x || 1.x + postcss: ^7.0.0 || ^8.0.1 + webpack: ^5.0.0 + peerDependenciesMeta: + '@rspack/core': + optional: true + webpack: + optional: true + + postcss-nested@6.2.0: + resolution: {integrity: sha512-HQbt28KulC5AJzG+cZtj9kvKB93CFCdLvog1WFLf1D+xmMvPGlBstkpTEZfK5+AN9hfJocyBFCNiqyS48bpgzQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + + postcss-selector-parser@6.1.1: + resolution: {integrity: sha512-b4dlw/9V8A71rLIDsSwVmak9z2DuBUB7CA1/wSdelNEzqsjoSPeADTWNO09lpH49Diy3/JIZ2bSPB1dI3LJCHg==} + engines: {node: '>=4'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.4.40: + resolution: {integrity: sha512-YF2kKIUzAofPMpfH6hOi2cGnv/HrUlfucspc7pDyvv7kGdqXrfj8SCl/t8owkEgKEuu8ZcRjSOxFxVLqwChZ2Q==} + engines: {node: ^10 || ^12 || >=14} + + prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + + prettier@3.3.3: + resolution: {integrity: sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==} + engines: {node: '>=14'} + hasBin: true + + protobufjs@7.3.2: + resolution: {integrity: sha512-RXyHaACeqXeqAKGLDl68rQKbmObRsTIn4TYVUUug1KfS47YWCo5MacGITEryugIgZqORCvJWEk4l449POg5Txg==} + engines: {node: '>=12.0.0'} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + pure-rand@6.1.0: + resolution: {integrity: sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==} + + queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + react-dom@18.3.1: + resolution: {integrity: sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==} + peerDependencies: + react: ^18.3.1 + + react-is@16.13.1: + resolution: {integrity: sha512-24e6ynE2H+OKt4kqsOvNd8kBpV65zoxbA4BVsEOB3ARVWQki/DHzaUoC5KuON/BiccDaCCTZBuOcfZs70kR8bQ==} + + react-refresh@0.14.2: + resolution: {integrity: sha512-jCvmsr+1IUSMUyzOkRcvnVbX3ZYC6g9TDrDbFuFmRDq7PD4yaGbLKNQL6k2jnArV8hjYxh7hVhAZB6s9HDGpZA==} + engines: {node: '>=0.10.0'} + + react@18.3.1: + resolution: {integrity: sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==} + engines: {node: '>=0.10.0'} + + read-cache@1.0.0: + resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} + + readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + + resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + + resolve@1.22.8: + resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} + hasBin: true + + reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + deprecated: Rimraf versions prior to v4 are no longer supported + hasBin: true + + rollup@4.19.0: + resolution: {integrity: sha512-5r7EYSQIowHsK4eTZ0Y81qpZuJz+MUuYeqmmYmRMl1nwhdmbiYqt5jwzf6u7wyOzJgYqtCRMtVRKOtHANBz7rA==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + + scheduler@0.23.2: + resolution: {integrity: sha512-UOShsPwz7NrMUqhR6t0hWjFduvOzbtv7toDH1/hIrfRNIDBnnBWd0CwJTGvTpngVlmwGCdP9/Zl/tVrDqcuYzQ==} + + semver@6.3.1: + resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} + hasBin: true + + semver@7.6.3: + resolution: {integrity: sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==} + engines: {node: '>=10'} + hasBin: true + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + + slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + source-map-js@1.2.0: + resolution: {integrity: sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==} + engines: {node: '>=0.10.0'} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string-width@5.1.2: + resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} + engines: {node: '>=12'} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-ansi@7.1.0: + resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} + engines: {node: '>=12'} + + strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + + sucrase@3.35.0: + resolution: {integrity: sha512-8EbVDiu9iN/nESwxeSxDKe0dunta1GOlHufmSSXxMD2z2/tMZpDMpvXQGsc+ajGo8y2uYUmixaSRUc/QPoQ0GA==} + engines: {node: '>=16 || 14 >=14.17'} + hasBin: true + + supports-color@5.5.0: + resolution: {integrity: sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==} + engines: {node: '>=4'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + tailwindcss@3.4.7: + resolution: {integrity: sha512-rxWZbe87YJb4OcSopb7up2Ba4U82BoiSGUdoDr3Ydrg9ckxFS/YWsvhN323GMcddgU65QRy7JndC7ahhInhvlQ==} + engines: {node: '>=14.0.0'} + hasBin: true + + text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + + thenify-all@1.6.0: + resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} + engines: {node: '>=0.8'} + + thenify@3.3.1: + resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} + + tiny-invariant@1.3.3: + resolution: {integrity: sha512-+FbBPE1o9QAYvviau/qC5SE3caw21q3xkvWKBtja5vgqOWIHHJ3ioaq1VPfn/Szqctz2bU/oYeKd9/z5BL+PVg==} + + tiny-warning@1.0.3: + resolution: {integrity: sha512-lBN9zLN/oAf68o3zNXYrdCt1kP8WsiGW8Oo2ka41b2IM5JL/S1CTyX1rW0mb/zSuJun0ZUrDxx4sqvYS2FWzPA==} + + to-fast-properties@2.0.0: + resolution: {integrity: sha512-/OaKK0xYrs3DmxRYqL/yDc+FxFUVYhDlXMhRmv3z915w2HF1tnN1omB354j8VUGO/hbRzyD6Y3sA7v7GS/ceog==} + engines: {node: '>=4'} + + to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + + ts-api-utils@1.3.0: + resolution: {integrity: sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==} + engines: {node: '>=16'} + peerDependencies: + typescript: '>=4.2.0' + + ts-interface-checker@0.1.13: + resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} + + type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + + type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + + typescript@5.5.4: + resolution: {integrity: sha512-Mtq29sKDAEYP7aljRgtPOpTvOfbwRWlS6dPRzwjdE+C0R4brX/GUyhHSecbHMFLNBLcJIPt9nl9yG5TZ1weH+Q==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@6.13.0: + resolution: {integrity: sha512-xtFJHudx8S2DSoujjMd1WeWvn7KKWFRESZTMeL1RptAYERu29D6jphMjjY+vn96jvN3kVPDNxU/E13VTaXj6jg==} + + unplugin@1.12.0: + resolution: {integrity: sha512-KeczzHl2sATPQUx1gzo+EnUkmN4VmGBYRRVOZSGvGITE9rGHRDGqft6ONceP3vgXcyJ2XjX5axG5jMWUwNCYLw==} + engines: {node: '>=14.0.0'} + + update-browserslist-db@1.1.0: + resolution: {integrity: sha512-EdRAaAyk2cUE1wOf2DkEhzxqOQvFOoRJFNS6NeyJ01Gp2beMRpBAINjM2iDXE3KCuKhwnvHIQCJm6ThL2Z+HzQ==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + + use-sync-external-store@1.2.2: + resolution: {integrity: sha512-PElTlVMwpblvbNqQ82d2n6RjStvdSoNe9FG28kNfz3WiXilJm4DdNkEzRhCZuIDwY8U08WVihhGR5iRqAwfDiw==} + peerDependencies: + react: ^16.8.0 || ^17.0.0 || ^18.0.0 + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vite@5.3.5: + resolution: {integrity: sha512-MdjglKR6AQXQb9JGiS7Rc2wC6uMjcm7Go/NHNO63EwiJXfuk9PgqiP/n5IDJCziMkfw9n4Ubp7lttNwz+8ZVKA==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || >=20.0.0 + less: '*' + lightningcss: ^1.21.0 + sass: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + + webpack-sources@3.2.3: + resolution: {integrity: sha512-/DyMEOrDgLKKIG0fmvtz+4dUX/3Ghozwgm6iPp8KRhvn+eQf9+Q7GWxVNMk3+uCPWfdXYC4ExGBckIXdFEfH1w==} + engines: {node: '>=10.13.0'} + + webpack-virtual-modules@0.6.2: + resolution: {integrity: sha512-66/V2i5hQanC51vBQKPH4aI8NMAcBW59FVBs+rC7eGHupMyfn34q7rZIE+ETlJ+XTevqfUhVVBgSUNSW2flEUQ==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + word-wrap@1.2.5: + resolution: {integrity: sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==} + engines: {node: '>=0.10.0'} + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrap-ansi@8.1.0: + resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} + engines: {node: '>=12'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + yallist@3.1.1: + resolution: {integrity: sha512-a4UGQaWPH59mOXUYnAG2ewncQS4i4F43Tv3JoAM+s2VDAmS9NsK8GpDMLrCHPksFT7h3K6TOoUNn2pb7RoXx4g==} + + yaml@2.5.0: + resolution: {integrity: sha512-2wWLbGbYDiSqqIKoPjar3MPgB94ErzCtrNE1FdqGuaO0pi2JGjmE8aW8TDZwzU7vuxcGRdL/4gPQwQ7hD5AMSw==} + engines: {node: '>= 14'} + hasBin: true + + yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + + zod@3.23.8: + resolution: {integrity: sha512-XBx9AXhXktjUqnepgTiE5flcKIYWi/rme0Eaj+5Y0lftuGBq+jyRu/md4WnuxqgP1ubdpNCsYEYPxrzVHD8d6g==} + +snapshots: + + '@alloc/quick-lru@5.2.0': {} + + '@ampproject/remapping@2.3.0': + dependencies: + '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/trace-mapping': 0.3.25 + + '@babel/code-frame@7.24.7': + dependencies: + '@babel/highlight': 7.24.7 + picocolors: 1.0.1 + + '@babel/compat-data@7.24.9': {} + + '@babel/core@7.24.9': + dependencies: + '@ampproject/remapping': 2.3.0 + '@babel/code-frame': 7.24.7 + '@babel/generator': 7.24.10 + '@babel/helper-compilation-targets': 7.24.8 + '@babel/helper-module-transforms': 7.24.9(@babel/core@7.24.9) + '@babel/helpers': 7.24.8 + '@babel/parser': 7.24.8 + '@babel/template': 7.24.7 + '@babel/traverse': 7.24.8 + '@babel/types': 7.24.9 + convert-source-map: 2.0.0 + debug: 4.3.5 + gensync: 1.0.0-beta.2 + json5: 2.2.3 + semver: 6.3.1 + transitivePeerDependencies: + - supports-color + + '@babel/generator@7.24.10': + dependencies: + '@babel/types': 7.24.9 + '@jridgewell/gen-mapping': 0.3.5 + '@jridgewell/trace-mapping': 0.3.25 + jsesc: 2.5.2 + + '@babel/helper-compilation-targets@7.24.8': + dependencies: + '@babel/compat-data': 7.24.9 + '@babel/helper-validator-option': 7.24.8 + browserslist: 4.23.2 + lru-cache: 5.1.1 + semver: 6.3.1 + + '@babel/helper-environment-visitor@7.24.7': + dependencies: + '@babel/types': 7.24.9 + + '@babel/helper-function-name@7.24.7': + dependencies: + '@babel/template': 7.24.7 + '@babel/types': 7.24.9 + + '@babel/helper-hoist-variables@7.24.7': + dependencies: + '@babel/types': 7.24.9 + + '@babel/helper-module-imports@7.24.7': + dependencies: + '@babel/traverse': 7.24.8 + '@babel/types': 7.24.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-module-transforms@7.24.9(@babel/core@7.24.9)': + dependencies: + '@babel/core': 7.24.9 + '@babel/helper-environment-visitor': 7.24.7 + '@babel/helper-module-imports': 7.24.7 + '@babel/helper-simple-access': 7.24.7 + '@babel/helper-split-export-declaration': 7.24.7 + '@babel/helper-validator-identifier': 7.24.7 + transitivePeerDependencies: + - supports-color + + '@babel/helper-plugin-utils@7.24.8': {} + + '@babel/helper-simple-access@7.24.7': + dependencies: + '@babel/traverse': 7.24.8 + '@babel/types': 7.24.9 + transitivePeerDependencies: + - supports-color + + '@babel/helper-split-export-declaration@7.24.7': + dependencies: + '@babel/types': 7.24.9 + + '@babel/helper-string-parser@7.24.8': {} + + '@babel/helper-validator-identifier@7.24.7': {} + + '@babel/helper-validator-option@7.24.8': {} + + '@babel/helpers@7.24.8': + dependencies: + '@babel/template': 7.24.7 + '@babel/types': 7.24.9 + + '@babel/highlight@7.24.7': + dependencies: + '@babel/helper-validator-identifier': 7.24.7 + chalk: 2.4.2 + js-tokens: 4.0.0 + picocolors: 1.0.1 + + '@babel/parser@7.24.8': + dependencies: + '@babel/types': 7.24.9 + + '@babel/plugin-syntax-jsx@7.24.7(@babel/core@7.24.9)': + dependencies: + '@babel/core': 7.24.9 + '@babel/helper-plugin-utils': 7.24.8 + + '@babel/plugin-syntax-typescript@7.24.7(@babel/core@7.24.9)': + dependencies: + '@babel/core': 7.24.9 + '@babel/helper-plugin-utils': 7.24.8 + + '@babel/plugin-transform-react-jsx-self@7.24.7(@babel/core@7.24.9)': + dependencies: + '@babel/core': 7.24.9 + '@babel/helper-plugin-utils': 7.24.8 + + '@babel/plugin-transform-react-jsx-source@7.24.7(@babel/core@7.24.9)': + dependencies: + '@babel/core': 7.24.9 + '@babel/helper-plugin-utils': 7.24.8 + + '@babel/template@7.24.7': + dependencies: + '@babel/code-frame': 7.24.7 + '@babel/parser': 7.24.8 + '@babel/types': 7.24.9 + + '@babel/traverse@7.24.8': + dependencies: + '@babel/code-frame': 7.24.7 + '@babel/generator': 7.24.10 + '@babel/helper-environment-visitor': 7.24.7 + '@babel/helper-function-name': 7.24.7 + '@babel/helper-hoist-variables': 7.24.7 + '@babel/helper-split-export-declaration': 7.24.7 + '@babel/parser': 7.24.8 + '@babel/types': 7.24.9 + debug: 4.3.5 + globals: 11.12.0 + transitivePeerDependencies: + - supports-color + + '@babel/types@7.24.9': + dependencies: + '@babel/helper-string-parser': 7.24.8 + '@babel/helper-validator-identifier': 7.24.7 + to-fast-properties: 2.0.0 + + '@effect/opentelemetry@0.35.0(@opentelemetry/api@1.9.0)(@opentelemetry/resources@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-metrics@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-node@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/sdk-trace-web@1.25.1(@opentelemetry/api@1.9.0))(@opentelemetry/semantic-conventions@1.25.1)(effect@3.6.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 + effect: 3.6.0 + optionalDependencies: + '@opentelemetry/sdk-metrics': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-node': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-web': 1.25.1(@opentelemetry/api@1.9.0) + + '@effect/platform@0.61.2(@effect/schema@0.70.1(effect@3.6.0))(effect@3.6.0)': + dependencies: + '@effect/schema': 0.70.1(effect@3.6.0) + effect: 3.6.0 + find-my-way-ts: 0.1.5 + multipasta: 0.2.3 + + '@effect/schema@0.70.1(effect@3.6.0)': + dependencies: + effect: 3.6.0 + fast-check: 3.20.0 + + '@esbuild/aix-ppc64@0.21.5': + optional: true + + '@esbuild/android-arm64@0.21.5': + optional: true + + '@esbuild/android-arm@0.21.5': + optional: true + + '@esbuild/android-x64@0.21.5': + optional: true + + '@esbuild/darwin-arm64@0.21.5': + optional: true + + '@esbuild/darwin-x64@0.21.5': + optional: true + + '@esbuild/freebsd-arm64@0.21.5': + optional: true + + '@esbuild/freebsd-x64@0.21.5': + optional: true + + '@esbuild/linux-arm64@0.21.5': + optional: true + + '@esbuild/linux-arm@0.21.5': + optional: true + + '@esbuild/linux-ia32@0.21.5': + optional: true + + '@esbuild/linux-loong64@0.21.5': + optional: true + + '@esbuild/linux-mips64el@0.21.5': + optional: true + + '@esbuild/linux-ppc64@0.21.5': + optional: true + + '@esbuild/linux-riscv64@0.21.5': + optional: true + + '@esbuild/linux-s390x@0.21.5': + optional: true + + '@esbuild/linux-x64@0.21.5': + optional: true + + '@esbuild/netbsd-x64@0.21.5': + optional: true + + '@esbuild/openbsd-x64@0.21.5': + optional: true + + '@esbuild/sunos-x64@0.21.5': + optional: true + + '@esbuild/win32-arm64@0.21.5': + optional: true + + '@esbuild/win32-ia32@0.21.5': + optional: true + + '@esbuild/win32-x64@0.21.5': + optional: true + + '@eslint-community/eslint-utils@4.4.0(eslint@8.57.0)': + dependencies: + eslint: 8.57.0 + eslint-visitor-keys: 3.4.3 + + '@eslint-community/regexpp@4.11.0': {} + + '@eslint/eslintrc@2.1.4': + dependencies: + ajv: 6.12.6 + debug: 4.3.5 + espree: 9.6.1 + globals: 13.24.0 + ignore: 5.3.1 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + + '@eslint/js@8.57.0': {} + + '@humanwhocodes/config-array@0.11.14': + dependencies: + '@humanwhocodes/object-schema': 2.0.3 + debug: 4.3.5 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + + '@humanwhocodes/module-importer@1.0.1': {} + + '@humanwhocodes/object-schema@2.0.3': {} + + '@isaacs/cliui@8.0.2': + dependencies: + string-width: 5.1.2 + string-width-cjs: string-width@4.2.3 + strip-ansi: 7.1.0 + strip-ansi-cjs: strip-ansi@6.0.1 + wrap-ansi: 8.1.0 + wrap-ansi-cjs: wrap-ansi@7.0.0 + + '@jridgewell/gen-mapping@0.3.5': + dependencies: + '@jridgewell/set-array': 1.2.1 + '@jridgewell/sourcemap-codec': 1.5.0 + '@jridgewell/trace-mapping': 0.3.25 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/set-array@1.2.1': {} + + '@jridgewell/sourcemap-codec@1.5.0': {} + + '@jridgewell/trace-mapping@0.3.25': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.0 + + '@nodelib/fs.scandir@2.1.5': + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + '@nodelib/fs.stat@2.0.5': {} + + '@nodelib/fs.walk@1.2.8': + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.17.1 + + '@opentelemetry/api-logs@0.52.1': + dependencies: + '@opentelemetry/api': 1.9.0 + + '@opentelemetry/api@1.9.0': {} + + '@opentelemetry/context-async-hooks@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + optional: true + + '@opentelemetry/core@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/semantic-conventions': 1.25.1 + + '@opentelemetry/exporter-trace-otlp-http@0.52.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-exporter-base': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-exporter-base@0.52.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/otlp-transformer': 0.52.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/otlp-transformer@0.52.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.52.1 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-logs': 0.52.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-metrics': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + protobufjs: 7.3.2 + + '@opentelemetry/propagator-b3@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + optional: true + + '@opentelemetry/propagator-jaeger@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + optional: true + + '@opentelemetry/resources@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 + + '@opentelemetry/sdk-logs@0.52.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/api-logs': 0.52.1 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + + '@opentelemetry/sdk-metrics@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + lodash.merge: 4.6.2 + + '@opentelemetry/sdk-trace-base@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/resources': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 + + '@opentelemetry/sdk-trace-node@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/context-async-hooks': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-b3': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/propagator-jaeger': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + semver: 7.6.3 + optional: true + + '@opentelemetry/sdk-trace-web@1.25.1(@opentelemetry/api@1.9.0)': + dependencies: + '@opentelemetry/api': 1.9.0 + '@opentelemetry/core': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/sdk-trace-base': 1.25.1(@opentelemetry/api@1.9.0) + '@opentelemetry/semantic-conventions': 1.25.1 + + '@opentelemetry/semantic-conventions@1.25.1': {} + + '@pkgjs/parseargs@0.11.0': + optional: true + + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + + '@rollup/rollup-android-arm-eabi@4.19.0': + optional: true + + '@rollup/rollup-android-arm64@4.19.0': + optional: true + + '@rollup/rollup-darwin-arm64@4.19.0': + optional: true + + '@rollup/rollup-darwin-x64@4.19.0': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.19.0': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.19.0': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.19.0': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.19.0': + optional: true + + '@rollup/rollup-linux-powerpc64le-gnu@4.19.0': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.19.0': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.19.0': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.19.0': + optional: true + + '@rollup/rollup-linux-x64-musl@4.19.0': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.19.0': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.19.0': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.19.0': + optional: true + + '@sentry-internal/browser-utils@8.22.0': + dependencies: + '@sentry/core': 8.22.0 + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + + '@sentry-internal/feedback@8.22.0': + dependencies: + '@sentry/core': 8.22.0 + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + + '@sentry-internal/replay-canvas@8.22.0': + dependencies: + '@sentry-internal/replay': 8.22.0 + '@sentry/core': 8.22.0 + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + + '@sentry-internal/replay@8.22.0': + dependencies: + '@sentry-internal/browser-utils': 8.22.0 + '@sentry/core': 8.22.0 + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + + '@sentry/browser@8.22.0': + dependencies: + '@sentry-internal/browser-utils': 8.22.0 + '@sentry-internal/feedback': 8.22.0 + '@sentry-internal/replay': 8.22.0 + '@sentry-internal/replay-canvas': 8.22.0 + '@sentry/core': 8.22.0 + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + + '@sentry/core@8.22.0': + dependencies: + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + + '@sentry/react@8.22.0(react@18.3.1)': + dependencies: + '@sentry/browser': 8.22.0 + '@sentry/core': 8.22.0 + '@sentry/types': 8.22.0 + '@sentry/utils': 8.22.0 + hoist-non-react-statics: 3.3.2 + react: 18.3.1 + + '@sentry/types@8.22.0': {} + + '@sentry/utils@8.22.0': + dependencies: + '@sentry/types': 8.22.0 + + '@tanstack/history@1.45.3': {} + + '@tanstack/query-core@5.51.21': {} + + '@tanstack/query-devtools@5.51.16': {} + + '@tanstack/react-query-devtools@5.51.21(@tanstack/react-query@5.51.21(react@18.3.1))(react@18.3.1)': + dependencies: + '@tanstack/query-devtools': 5.51.16 + '@tanstack/react-query': 5.51.21(react@18.3.1) + react: 18.3.1 + + '@tanstack/react-query@5.51.21(react@18.3.1)': + dependencies: + '@tanstack/query-core': 5.51.21 + react: 18.3.1 + + '@tanstack/react-router@1.45.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@tanstack/history': 1.45.3 + '@tanstack/react-store': 0.5.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + tiny-invariant: 1.3.3 + tiny-warning: 1.0.3 + + '@tanstack/react-store@0.5.5(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@tanstack/store': 0.5.5 + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + use-sync-external-store: 1.2.2(react@18.3.1) + + '@tanstack/router-devtools@1.45.11(@tanstack/react-router@1.45.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1))(csstype@3.1.3)(react-dom@18.3.1(react@18.3.1))(react@18.3.1)': + dependencies: + '@tanstack/react-router': 1.45.11(react-dom@18.3.1(react@18.3.1))(react@18.3.1) + clsx: 2.1.1 + goober: 2.1.14(csstype@3.1.3) + react: 18.3.1 + react-dom: 18.3.1(react@18.3.1) + transitivePeerDependencies: + - csstype + + '@tanstack/router-generator@1.45.7': + dependencies: + prettier: 3.3.3 + zod: 3.23.8 + + '@tanstack/router-plugin@1.45.8(vite@5.3.5(@types/node@22.1.0))': + dependencies: + '@babel/core': 7.24.9 + '@babel/generator': 7.24.10 + '@babel/parser': 7.24.8 + '@babel/plugin-syntax-jsx': 7.24.7(@babel/core@7.24.9) + '@babel/plugin-syntax-typescript': 7.24.7(@babel/core@7.24.9) + '@babel/template': 7.24.7 + '@babel/traverse': 7.24.8 + '@babel/types': 7.24.9 + '@tanstack/router-generator': 1.45.7 + '@types/babel__core': 7.20.5 + '@types/babel__generator': 7.6.8 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.6 + babel-dead-code-elimination: 1.0.6 + chokidar: 3.6.0 + unplugin: 1.12.0 + zod: 3.23.8 + optionalDependencies: + vite: 5.3.5(@types/node@22.1.0) + transitivePeerDependencies: + - supports-color + + '@tanstack/store@0.5.5': {} + + '@types/babel__core@7.20.5': + dependencies: + '@babel/parser': 7.24.8 + '@babel/types': 7.24.9 + '@types/babel__generator': 7.6.8 + '@types/babel__template': 7.4.4 + '@types/babel__traverse': 7.20.6 + + '@types/babel__generator@7.6.8': + dependencies: + '@babel/types': 7.24.9 + + '@types/babel__template@7.4.4': + dependencies: + '@babel/parser': 7.24.8 + '@babel/types': 7.24.9 + + '@types/babel__traverse@7.20.6': + dependencies: + '@babel/types': 7.24.9 + + '@types/estree@1.0.5': {} + + '@types/node@22.1.0': + dependencies: + undici-types: 6.13.0 + + '@types/prop-types@15.7.12': {} + + '@types/react-dom@18.3.0': + dependencies: + '@types/react': 18.3.3 + + '@types/react@18.3.3': + dependencies: + '@types/prop-types': 15.7.12 + csstype: 3.1.3 + + '@typescript-eslint/eslint-plugin@7.17.0(@typescript-eslint/parser@7.17.0(eslint@8.57.0)(typescript@5.5.4))(eslint@8.57.0)(typescript@5.5.4)': + dependencies: + '@eslint-community/regexpp': 4.11.0 + '@typescript-eslint/parser': 7.17.0(eslint@8.57.0)(typescript@5.5.4) + '@typescript-eslint/scope-manager': 7.17.0 + '@typescript-eslint/type-utils': 7.17.0(eslint@8.57.0)(typescript@5.5.4) + '@typescript-eslint/utils': 7.17.0(eslint@8.57.0)(typescript@5.5.4) + '@typescript-eslint/visitor-keys': 7.17.0 + eslint: 8.57.0 + graphemer: 1.4.0 + ignore: 5.3.1 + natural-compare: 1.4.0 + ts-api-utils: 1.3.0(typescript@5.5.4) + optionalDependencies: + typescript: 5.5.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/parser@7.17.0(eslint@8.57.0)(typescript@5.5.4)': + dependencies: + '@typescript-eslint/scope-manager': 7.17.0 + '@typescript-eslint/types': 7.17.0 + '@typescript-eslint/typescript-estree': 7.17.0(typescript@5.5.4) + '@typescript-eslint/visitor-keys': 7.17.0 + debug: 4.3.5 + eslint: 8.57.0 + optionalDependencies: + typescript: 5.5.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/scope-manager@7.17.0': + dependencies: + '@typescript-eslint/types': 7.17.0 + '@typescript-eslint/visitor-keys': 7.17.0 + + '@typescript-eslint/type-utils@7.17.0(eslint@8.57.0)(typescript@5.5.4)': + dependencies: + '@typescript-eslint/typescript-estree': 7.17.0(typescript@5.5.4) + '@typescript-eslint/utils': 7.17.0(eslint@8.57.0)(typescript@5.5.4) + debug: 4.3.5 + eslint: 8.57.0 + ts-api-utils: 1.3.0(typescript@5.5.4) + optionalDependencies: + typescript: 5.5.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/types@7.17.0': {} + + '@typescript-eslint/typescript-estree@7.17.0(typescript@5.5.4)': + dependencies: + '@typescript-eslint/types': 7.17.0 + '@typescript-eslint/visitor-keys': 7.17.0 + debug: 4.3.5 + globby: 11.1.0 + is-glob: 4.0.3 + minimatch: 9.0.5 + semver: 7.6.3 + ts-api-utils: 1.3.0(typescript@5.5.4) + optionalDependencies: + typescript: 5.5.4 + transitivePeerDependencies: + - supports-color + + '@typescript-eslint/utils@7.17.0(eslint@8.57.0)(typescript@5.5.4)': + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0) + '@typescript-eslint/scope-manager': 7.17.0 + '@typescript-eslint/types': 7.17.0 + '@typescript-eslint/typescript-estree': 7.17.0(typescript@5.5.4) + eslint: 8.57.0 + transitivePeerDependencies: + - supports-color + - typescript + + '@typescript-eslint/visitor-keys@7.17.0': + dependencies: + '@typescript-eslint/types': 7.17.0 + eslint-visitor-keys: 3.4.3 + + '@ungap/structured-clone@1.2.0': {} + + '@vitejs/plugin-react@4.3.1(vite@5.3.5(@types/node@22.1.0))': + dependencies: + '@babel/core': 7.24.9 + '@babel/plugin-transform-react-jsx-self': 7.24.7(@babel/core@7.24.9) + '@babel/plugin-transform-react-jsx-source': 7.24.7(@babel/core@7.24.9) + '@types/babel__core': 7.20.5 + react-refresh: 0.14.2 + vite: 5.3.5(@types/node@22.1.0) + transitivePeerDependencies: + - supports-color + + acorn-jsx@5.3.2(acorn@8.12.1): + dependencies: + acorn: 8.12.1 + + acorn@8.12.1: {} + + ajv@6.12.6: + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + + ansi-regex@5.0.1: {} + + ansi-regex@6.0.1: {} + + ansi-styles@3.2.1: + dependencies: + color-convert: 1.9.3 + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + ansi-styles@6.2.1: {} + + any-promise@1.3.0: {} + + anymatch@3.1.3: + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + + arg@5.0.2: {} + + argparse@2.0.1: {} + + array-union@2.1.0: {} + + autoprefixer@10.4.19(postcss@8.4.40): + dependencies: + browserslist: 4.23.2 + caniuse-lite: 1.0.30001643 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.0.1 + postcss: 8.4.40 + postcss-value-parser: 4.2.0 + + babel-dead-code-elimination@1.0.6: + dependencies: + '@babel/core': 7.24.9 + '@babel/parser': 7.24.8 + '@babel/traverse': 7.24.8 + '@babel/types': 7.24.9 + transitivePeerDependencies: + - supports-color + + balanced-match@1.0.2: {} + + binary-extensions@2.3.0: {} + + brace-expansion@1.1.11: + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + brace-expansion@2.0.1: + dependencies: + balanced-match: 1.0.2 + + braces@3.0.3: + dependencies: + fill-range: 7.1.1 + + browserslist@4.23.2: + dependencies: + caniuse-lite: 1.0.30001643 + electron-to-chromium: 1.5.2 + node-releases: 2.0.18 + update-browserslist-db: 1.1.0(browserslist@4.23.2) + + callsites@3.1.0: {} + + camelcase-css@2.0.1: {} + + caniuse-lite@1.0.30001643: {} + + chalk@2.4.2: + dependencies: + ansi-styles: 3.2.1 + escape-string-regexp: 1.0.5 + supports-color: 5.5.0 + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + chokidar@3.6.0: + dependencies: + anymatch: 3.1.3 + braces: 3.0.3 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + + clsx@2.1.1: {} + + color-convert@1.9.3: + dependencies: + color-name: 1.1.3 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-name@1.1.3: {} + + color-name@1.1.4: {} + + commander@4.1.1: {} + + concat-map@0.0.1: {} + + convert-source-map@2.0.0: {} + + cosmiconfig@9.0.0(typescript@5.5.4): + dependencies: + env-paths: 2.2.1 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + parse-json: 5.2.0 + optionalDependencies: + typescript: 5.5.4 + + cross-spawn@7.0.3: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + cssesc@3.0.0: {} + + csstype@3.1.3: {} + + debug@4.3.5: + dependencies: + ms: 2.1.2 + + deep-is@0.1.4: {} + + didyoumean@1.2.2: {} + + dir-glob@3.0.1: + dependencies: + path-type: 4.0.0 + + dlv@1.1.3: {} + + doctrine@3.0.0: + dependencies: + esutils: 2.0.3 + + eastasianwidth@0.2.0: {} + + effect@3.6.0: {} + + electron-to-chromium@1.5.2: {} + + emoji-regex@8.0.0: {} + + emoji-regex@9.2.2: {} + + env-paths@2.2.1: {} + + error-ex@1.3.2: + dependencies: + is-arrayish: 0.2.1 + + esbuild@0.21.5: + optionalDependencies: + '@esbuild/aix-ppc64': 0.21.5 + '@esbuild/android-arm': 0.21.5 + '@esbuild/android-arm64': 0.21.5 + '@esbuild/android-x64': 0.21.5 + '@esbuild/darwin-arm64': 0.21.5 + '@esbuild/darwin-x64': 0.21.5 + '@esbuild/freebsd-arm64': 0.21.5 + '@esbuild/freebsd-x64': 0.21.5 + '@esbuild/linux-arm': 0.21.5 + '@esbuild/linux-arm64': 0.21.5 + '@esbuild/linux-ia32': 0.21.5 + '@esbuild/linux-loong64': 0.21.5 + '@esbuild/linux-mips64el': 0.21.5 + '@esbuild/linux-ppc64': 0.21.5 + '@esbuild/linux-riscv64': 0.21.5 + '@esbuild/linux-s390x': 0.21.5 + '@esbuild/linux-x64': 0.21.5 + '@esbuild/netbsd-x64': 0.21.5 + '@esbuild/openbsd-x64': 0.21.5 + '@esbuild/sunos-x64': 0.21.5 + '@esbuild/win32-arm64': 0.21.5 + '@esbuild/win32-ia32': 0.21.5 + '@esbuild/win32-x64': 0.21.5 + + escalade@3.1.2: {} + + escape-string-regexp@1.0.5: {} + + escape-string-regexp@4.0.0: {} + + eslint-plugin-react-hooks@4.6.2(eslint@8.57.0): + dependencies: + eslint: 8.57.0 + + eslint-plugin-react-refresh@0.4.9(eslint@8.57.0): + dependencies: + eslint: 8.57.0 + + eslint-scope@7.2.2: + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + + eslint-visitor-keys@3.4.3: {} + + eslint@8.57.0: + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.57.0) + '@eslint-community/regexpp': 4.11.0 + '@eslint/eslintrc': 2.1.4 + '@eslint/js': 8.57.0 + '@humanwhocodes/config-array': 0.11.14 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.2.0 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.5 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.6.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.24.0 + graphemer: 1.4.0 + ignore: 5.3.1 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.4 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + + espree@9.6.1: + dependencies: + acorn: 8.12.1 + acorn-jsx: 5.3.2(acorn@8.12.1) + eslint-visitor-keys: 3.4.3 + + esquery@1.6.0: + dependencies: + estraverse: 5.3.0 + + esrecurse@4.3.0: + dependencies: + estraverse: 5.3.0 + + estraverse@5.3.0: {} + + esutils@2.0.3: {} + + fast-check@3.20.0: + dependencies: + pure-rand: 6.1.0 + + fast-deep-equal@3.1.3: {} + + fast-glob@3.3.2: + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.7 + + fast-json-stable-stringify@2.1.0: {} + + fast-levenshtein@2.0.6: {} + + fastq@1.17.1: + dependencies: + reusify: 1.0.4 + + file-entry-cache@6.0.1: + dependencies: + flat-cache: 3.2.0 + + fill-range@7.1.1: + dependencies: + to-regex-range: 5.0.1 + + find-my-way-ts@0.1.5: {} + + find-up@5.0.0: + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + + flat-cache@3.2.0: + dependencies: + flatted: 3.3.1 + keyv: 4.5.4 + rimraf: 3.0.2 + + flatted@3.3.1: {} + + foreground-child@3.2.1: + dependencies: + cross-spawn: 7.0.3 + signal-exit: 4.1.0 + + fraction.js@4.3.7: {} + + fs.realpath@1.0.0: {} + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + gensync@1.0.0-beta.2: {} + + glob-parent@5.1.2: + dependencies: + is-glob: 4.0.3 + + glob-parent@6.0.2: + dependencies: + is-glob: 4.0.3 + + glob@10.4.5: + dependencies: + foreground-child: 3.2.1 + jackspeak: 3.4.3 + minimatch: 9.0.5 + minipass: 7.1.2 + package-json-from-dist: 1.0.0 + path-scurry: 1.11.1 + + glob@7.2.3: + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + globals@11.12.0: {} + + globals@13.24.0: + dependencies: + type-fest: 0.20.2 + + globby@11.1.0: + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.2 + ignore: 5.3.1 + merge2: 1.4.1 + slash: 3.0.0 + + goober@2.1.14(csstype@3.1.3): + dependencies: + csstype: 3.1.3 + + graphemer@1.4.0: {} + + has-flag@3.0.0: {} + + has-flag@4.0.0: {} + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + hoist-non-react-statics@3.3.2: + dependencies: + react-is: 16.13.1 + + ignore@5.3.1: {} + + import-fresh@3.3.0: + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + + imurmurhash@0.1.4: {} + + inflight@1.0.6: + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + inherits@2.0.4: {} + + is-arrayish@0.2.1: {} + + is-binary-path@2.1.0: + dependencies: + binary-extensions: 2.3.0 + + is-core-module@2.15.0: + dependencies: + hasown: 2.0.2 + + is-extglob@2.1.1: {} + + is-fullwidth-code-point@3.0.0: {} + + is-glob@4.0.3: + dependencies: + is-extglob: 2.1.1 + + is-number@7.0.0: {} + + is-path-inside@3.0.3: {} + + isexe@2.0.0: {} + + jackspeak@3.4.3: + dependencies: + '@isaacs/cliui': 8.0.2 + optionalDependencies: + '@pkgjs/parseargs': 0.11.0 + + jiti@1.21.6: {} + + js-tokens@4.0.0: {} + + js-yaml@4.1.0: + dependencies: + argparse: 2.0.1 + + jsesc@2.5.2: {} + + json-buffer@3.0.1: {} + + json-parse-even-better-errors@2.3.1: {} + + json-schema-traverse@0.4.1: {} + + json-stable-stringify-without-jsonify@1.0.1: {} + + json5@2.2.3: {} + + keyv@4.5.4: + dependencies: + json-buffer: 3.0.1 + + levn@0.4.1: + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + + lilconfig@2.1.0: {} + + lilconfig@3.1.2: {} + + lines-and-columns@1.2.4: {} + + locate-path@6.0.0: + dependencies: + p-locate: 5.0.0 + + lodash.merge@4.6.2: {} + + long@5.2.3: {} + + loose-envify@1.4.0: + dependencies: + js-tokens: 4.0.0 + + lru-cache@10.4.3: {} + + lru-cache@5.1.1: + dependencies: + yallist: 3.1.1 + + merge2@1.4.1: {} + + micromatch@4.0.7: + dependencies: + braces: 3.0.3 + picomatch: 2.3.1 + + minimatch@3.1.2: + dependencies: + brace-expansion: 1.1.11 + + minimatch@9.0.5: + dependencies: + brace-expansion: 2.0.1 + + minipass@7.1.2: {} + + ms@2.1.2: {} + + multipasta@0.2.3: {} + + mz@2.7.0: + dependencies: + any-promise: 1.3.0 + object-assign: 4.1.1 + thenify-all: 1.6.0 + + nanoid@3.3.7: {} + + natural-compare@1.4.0: {} + + node-releases@2.0.18: {} + + normalize-path@3.0.0: {} + + normalize-range@0.1.2: {} + + object-assign@4.1.1: {} + + object-hash@3.0.0: {} + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + optionator@0.9.4: + dependencies: + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + word-wrap: 1.2.5 + + p-limit@3.1.0: + dependencies: + yocto-queue: 0.1.0 + + p-locate@5.0.0: + dependencies: + p-limit: 3.1.0 + + package-json-from-dist@1.0.0: {} + + parent-module@1.0.1: + dependencies: + callsites: 3.1.0 + + parse-json@5.2.0: + dependencies: + '@babel/code-frame': 7.24.7 + error-ex: 1.3.2 + json-parse-even-better-errors: 2.3.1 + lines-and-columns: 1.2.4 + + path-exists@4.0.0: {} + + path-is-absolute@1.0.1: {} + + path-key@3.1.1: {} + + path-parse@1.0.7: {} + + path-scurry@1.11.1: + dependencies: + lru-cache: 10.4.3 + minipass: 7.1.2 + + path-type@4.0.0: {} + + picocolors@1.0.1: {} + + picomatch@2.3.1: {} + + pify@2.3.0: {} + + pirates@4.0.6: {} + + postcss-import@15.1.0(postcss@8.4.40): + dependencies: + postcss: 8.4.40 + postcss-value-parser: 4.2.0 + read-cache: 1.0.0 + resolve: 1.22.8 + + postcss-js@4.0.1(postcss@8.4.40): + dependencies: + camelcase-css: 2.0.1 + postcss: 8.4.40 + + postcss-load-config@4.0.2(postcss@8.4.40): + dependencies: + lilconfig: 3.1.2 + yaml: 2.5.0 + optionalDependencies: + postcss: 8.4.40 + + postcss-loader@8.1.1(postcss@8.4.40)(typescript@5.5.4): + dependencies: + cosmiconfig: 9.0.0(typescript@5.5.4) + jiti: 1.21.6 + postcss: 8.4.40 + semver: 7.6.3 + transitivePeerDependencies: + - typescript + + postcss-nested@6.2.0(postcss@8.4.40): + dependencies: + postcss: 8.4.40 + postcss-selector-parser: 6.1.1 + + postcss-selector-parser@6.1.1: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss-value-parser@4.2.0: {} + + postcss@8.4.40: + dependencies: + nanoid: 3.3.7 + picocolors: 1.0.1 + source-map-js: 1.2.0 + + prelude-ls@1.2.1: {} + + prettier@3.3.3: {} + + protobufjs@7.3.2: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 22.1.0 + long: 5.2.3 + + punycode@2.3.1: {} + + pure-rand@6.1.0: {} + + queue-microtask@1.2.3: {} + + react-dom@18.3.1(react@18.3.1): + dependencies: + loose-envify: 1.4.0 + react: 18.3.1 + scheduler: 0.23.2 + + react-is@16.13.1: {} + + react-refresh@0.14.2: {} + + react@18.3.1: + dependencies: + loose-envify: 1.4.0 + + read-cache@1.0.0: + dependencies: + pify: 2.3.0 + + readdirp@3.6.0: + dependencies: + picomatch: 2.3.1 + + resolve-from@4.0.0: {} + + resolve@1.22.8: + dependencies: + is-core-module: 2.15.0 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + reusify@1.0.4: {} + + rimraf@3.0.2: + dependencies: + glob: 7.2.3 + + rollup@4.19.0: + dependencies: + '@types/estree': 1.0.5 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.19.0 + '@rollup/rollup-android-arm64': 4.19.0 + '@rollup/rollup-darwin-arm64': 4.19.0 + '@rollup/rollup-darwin-x64': 4.19.0 + '@rollup/rollup-linux-arm-gnueabihf': 4.19.0 + '@rollup/rollup-linux-arm-musleabihf': 4.19.0 + '@rollup/rollup-linux-arm64-gnu': 4.19.0 + '@rollup/rollup-linux-arm64-musl': 4.19.0 + '@rollup/rollup-linux-powerpc64le-gnu': 4.19.0 + '@rollup/rollup-linux-riscv64-gnu': 4.19.0 + '@rollup/rollup-linux-s390x-gnu': 4.19.0 + '@rollup/rollup-linux-x64-gnu': 4.19.0 + '@rollup/rollup-linux-x64-musl': 4.19.0 + '@rollup/rollup-win32-arm64-msvc': 4.19.0 + '@rollup/rollup-win32-ia32-msvc': 4.19.0 + '@rollup/rollup-win32-x64-msvc': 4.19.0 + fsevents: 2.3.3 + + run-parallel@1.2.0: + dependencies: + queue-microtask: 1.2.3 + + scheduler@0.23.2: + dependencies: + loose-envify: 1.4.0 + + semver@6.3.1: {} + + semver@7.6.3: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + signal-exit@4.1.0: {} + + slash@3.0.0: {} + + source-map-js@1.2.0: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string-width@5.1.2: + dependencies: + eastasianwidth: 0.2.0 + emoji-regex: 9.2.2 + strip-ansi: 7.1.0 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-ansi@7.1.0: + dependencies: + ansi-regex: 6.0.1 + + strip-json-comments@3.1.1: {} + + sucrase@3.35.0: + dependencies: + '@jridgewell/gen-mapping': 0.3.5 + commander: 4.1.1 + glob: 10.4.5 + lines-and-columns: 1.2.4 + mz: 2.7.0 + pirates: 4.0.6 + ts-interface-checker: 0.1.13 + + supports-color@5.5.0: + dependencies: + has-flag: 3.0.0 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + supports-preserve-symlinks-flag@1.0.0: {} + + tailwindcss@3.4.7: + dependencies: + '@alloc/quick-lru': 5.2.0 + arg: 5.0.2 + chokidar: 3.6.0 + didyoumean: 1.2.2 + dlv: 1.1.3 + fast-glob: 3.3.2 + glob-parent: 6.0.2 + is-glob: 4.0.3 + jiti: 1.21.6 + lilconfig: 2.1.0 + micromatch: 4.0.7 + normalize-path: 3.0.0 + object-hash: 3.0.0 + picocolors: 1.0.1 + postcss: 8.4.40 + postcss-import: 15.1.0(postcss@8.4.40) + postcss-js: 4.0.1(postcss@8.4.40) + postcss-load-config: 4.0.2(postcss@8.4.40) + postcss-nested: 6.2.0(postcss@8.4.40) + postcss-selector-parser: 6.1.1 + resolve: 1.22.8 + sucrase: 3.35.0 + transitivePeerDependencies: + - ts-node + + text-table@0.2.0: {} + + thenify-all@1.6.0: + dependencies: + thenify: 3.3.1 + + thenify@3.3.1: + dependencies: + any-promise: 1.3.0 + + tiny-invariant@1.3.3: {} + + tiny-warning@1.0.3: {} + + to-fast-properties@2.0.0: {} + + to-regex-range@5.0.1: + dependencies: + is-number: 7.0.0 + + ts-api-utils@1.3.0(typescript@5.5.4): + dependencies: + typescript: 5.5.4 + + ts-interface-checker@0.1.13: {} + + type-check@0.4.0: + dependencies: + prelude-ls: 1.2.1 + + type-fest@0.20.2: {} + + typescript@5.5.4: {} + + undici-types@6.13.0: {} + + unplugin@1.12.0: + dependencies: + acorn: 8.12.1 + chokidar: 3.6.0 + webpack-sources: 3.2.3 + webpack-virtual-modules: 0.6.2 + + update-browserslist-db@1.1.0(browserslist@4.23.2): + dependencies: + browserslist: 4.23.2 + escalade: 3.1.2 + picocolors: 1.0.1 + + uri-js@4.4.1: + dependencies: + punycode: 2.3.1 + + use-sync-external-store@1.2.2(react@18.3.1): + dependencies: + react: 18.3.1 + + util-deprecate@1.0.2: {} + + vite@5.3.5(@types/node@22.1.0): + dependencies: + esbuild: 0.21.5 + postcss: 8.4.40 + rollup: 4.19.0 + optionalDependencies: + '@types/node': 22.1.0 + fsevents: 2.3.3 + + webpack-sources@3.2.3: {} + + webpack-virtual-modules@0.6.2: {} + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + word-wrap@1.2.5: {} + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrap-ansi@8.1.0: + dependencies: + ansi-styles: 6.2.1 + string-width: 5.1.2 + strip-ansi: 7.1.0 + + wrappy@1.0.2: {} + + yallist@3.1.1: {} + + yaml@2.5.0: {} + + yocto-queue@0.1.0: {} + + zod@3.23.8: {} diff --git a/web/postcss.config.js b/web/postcss.config.js new file mode 100644 index 0000000..49c0612 --- /dev/null +++ b/web/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/web/public/vite.svg b/web/public/vite.svg new file mode 100644 index 0000000..e7b8dfb --- /dev/null +++ b/web/public/vite.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/web/src/api/auth.ts b/web/src/api/auth.ts new file mode 100644 index 0000000..19ebc3f --- /dev/null +++ b/web/src/api/auth.ts @@ -0,0 +1,51 @@ +import {HttpClient, HttpClientRequest, HttpClientResponse,} from "@effect/platform"; +import * as Schema from "@effect/schema/Schema"; +import {Effect, Schedule} from "effect"; +import {OTLP_TRACE_PROPAGATION} from "../env.ts"; +import {APIResponse, BASE_URL, FailedAPIResponse} from "./response.ts"; + +export const UserId = Schema.UUID.pipe(Schema.brand("UserId")); +export type UserId = Schema.Schema.Type; + +export class UserInfo extends Schema.Class("UserInfo")({ + id: UserId, + username: Schema.String, +}) { +} + +export const fetchUserInfo = HttpClientRequest.get( + `${BASE_URL}/api/auth/authn/@me`, +).pipe( + HttpClient.fetch, + HttpClientResponse.schemaBodyJsonScoped(APIResponse(UserInfo)), + Effect.filterOrFail( + (res) => res.ok, + (res) => (res as FailedAPIResponse).error, + ), + Effect.retry({times: 3, schedule: Schedule.exponential(250, 2)}), + Effect.map((d) => d.data), + Effect.either, + Effect.withSpan("fetchUserInfo"), + HttpClient.withTracerPropagation(OTLP_TRACE_PROPAGATION), +); + +export class UserCreation extends Schema.Class("UserCreation")({ + username: Schema.String, +}) { +} + +const createUserBody = HttpClientRequest.schemaBody(UserCreation); +export const createUser = (data: UserCreation) => + HttpClientRequest.post(`${BASE_URL}/api/app/users/`).pipe( + createUserBody(data), + Effect.andThen(HttpClient.fetch), + HttpClientResponse.schemaBodyJsonScoped(APIResponse(UserInfo)), + Effect.filterOrFail( + (res) => res.ok, + (res) => (res as FailedAPIResponse).error, + ), + Effect.map((d) => d.data), + Effect.either, + Effect.withSpan("createUser", {attributes: {data: data}}), + HttpClient.withTracerPropagation(OTLP_TRACE_PROPAGATION), + ); diff --git a/web/src/api/index.ts b/web/src/api/index.ts new file mode 100644 index 0000000..8bd8c54 --- /dev/null +++ b/web/src/api/index.ts @@ -0,0 +1,28 @@ +import {layer} from "@effect/opentelemetry/WebSdk"; +import {OTLPTraceExporter} from "@opentelemetry/exporter-trace-otlp-http"; +import {BatchSpanProcessor} from "@opentelemetry/sdk-trace-base"; +import {Layer, ManagedRuntime} from "effect"; +import {OTLP_ENDPOINT, OTLP_TRACE_PROPAGATION} from "../env.ts"; + +export const WebSdkLive = OTLP_TRACE_PROPAGATION + ? layer(() => ({ + resource: {serviceName: "Web UI"}, + spanProcessor: new BatchSpanProcessor( + new OTLPTraceExporter({url: OTLP_ENDPOINT}), + ), + })) + : Layer.empty; + +if (OTLP_ENDPOINT) { + console.log("OpenTelemetry initialized with OTLP endpoint", OTLP_ENDPOINT); + + if (OTLP_TRACE_PROPAGATION) { + console.log("OpenTelemetry initialized with trace propagation"); + } else { + console.warn("OpenTelemetry initialized without trace propagation"); + } +} else { + console.warn("OpenTelemetry not initialized because no OTLP endpoint is set"); +} + +export const runtime = ManagedRuntime.make(WebSdkLive); diff --git a/web/src/api/notes.ts b/web/src/api/notes.ts new file mode 100644 index 0000000..d77214b --- /dev/null +++ b/web/src/api/notes.ts @@ -0,0 +1,40 @@ +import {HttpClient, HttpClientRequest, HttpClientResponse,} from "@effect/platform"; +import * as Schema from "@effect/schema/Schema"; +import {Effect} from "effect"; +import {OTLP_TRACE_PROPAGATION} from "../env.ts"; +import {APIResponse, BASE_URL, FailedAPIResponse} from "./response.ts"; + +export const NoteId = Schema.UUID.pipe(Schema.brand("UserId")); +export type NoteId = Schema.Schema.Type; + +export const NoteVisibility = Schema.Literal("public", "private", "direct"); +export type NoteVisibility = Schema.Schema.Type; + +export class Note extends Schema.Class("Note")({ + id: NoteId, + visibility: NoteVisibility, +}) { +} + +export class NoteCreation extends Schema.Class("NoteCreation")({ + content: Schema.String, + visibility: Schema.optionalWith(NoteVisibility, {default: () => "public"}), + mentions: Schema.optional(Schema.Array(Schema.String)), +}) { +} + +const createNoteBody = HttpClientRequest.schemaBody(NoteCreation); +export const createNote = (data: NoteCreation) => + HttpClientRequest.post(`${BASE_URL}/api/app/notes/`).pipe( + createNoteBody(data), + Effect.andThen(HttpClient.fetch), + HttpClientResponse.schemaBodyJsonScoped(APIResponse(Note)), + Effect.filterOrFail( + (res) => res.ok, + (res) => (res as FailedAPIResponse).error, + ), + Effect.map((d) => d.data), + Effect.either, + Effect.withSpan("createNote", {attributes: {data: data}}), + HttpClient.withTracerPropagation(OTLP_TRACE_PROPAGATION), + ); diff --git a/web/src/api/response.ts b/web/src/api/response.ts new file mode 100644 index 0000000..6d3bf25 --- /dev/null +++ b/web/src/api/response.ts @@ -0,0 +1,42 @@ +import * as Schema from "@effect/schema/Schema"; + +export const BASE_URL = import.meta.env.VITE_BASE_URL ?? "INVALID"; + +export class APIError extends Schema.Class("APIError")({ + status_code: Schema.Number, + description: Schema.String, + metadata: Schema.optional( + Schema.Record({key: Schema.String, value: Schema.Any}), + ), +}) { + _tag = "APIError" as const; + + static toString(error: APIError) { + return `${error.status_code}: ${error.description}${error.metadata ? JSON.stringify(error.metadata) : ""}`; + } + + override toString() { + return APIError.toString(this); + } + + get message() { + return this.toString(); + } +} + +export const FailedAPIResponse = Schema.Struct({ + ok: Schema.Literal(false), + error: APIError, + data: Schema.Null, +}); +export type FailedAPIResponse = Schema.Schema.Type; + +export const APIResponse = (data: Schema.Schema) => + Schema.Union( + Schema.Struct({ + ok: Schema.Literal(true), + data: data, + error: Schema.Null, + }), + FailedAPIResponse, + ); diff --git a/web/src/assets/react.svg b/web/src/assets/react.svg new file mode 100644 index 0000000..6c87de9 --- /dev/null +++ b/web/src/assets/react.svg @@ -0,0 +1 @@ + \ No newline at end of file diff --git a/web/src/env.ts b/web/src/env.ts new file mode 100644 index 0000000..4c071cc --- /dev/null +++ b/web/src/env.ts @@ -0,0 +1,9 @@ +export const SENTRY_DSN = import.meta.env.VITE_SENTRY_DSN; +export const ENVIRONMENT = import.meta.env.VITE_ENVIRONMENT ?? "production"; + +// TODO: Figure out if we want to propagate the spans from the frontend +export const OTLP_TRACE_PROPAGATION = !!( + import.meta.env.VITE_OTLP_TRACE_PROPAGATION ?? "false" +); +export const OTLP_ENDPOINT = + import.meta.env.VITE_OTLP_ENDPOINT ?? "http://localhost:4318/v1/traces"; diff --git a/web/src/hooks/useCreateNote.ts b/web/src/hooks/useCreateNote.ts new file mode 100644 index 0000000..6b2dbb3 --- /dev/null +++ b/web/src/hooks/useCreateNote.ts @@ -0,0 +1,33 @@ +import {useMutation, useQueryClient} from "@tanstack/react-query"; +import {Effect, Either} from "effect"; +import {runtime} from "../api"; +import {createNote, Note, NoteCreation} from "../api/notes.ts"; + +type T = Effect.Effect.Success>; + +export const useCreateNote = () => { + const qc = useQueryClient(); + + return useMutation< + Either.Either.Right, + Either.Either.Left, + NoteCreation + >({ + mutationKey: ["note"], + mutationFn: async (m) => { + const v = await createNote(m).pipe(runtime.runPromise); + if (Either.isLeft(v)) throw v.left; + + return v.right; + }, + onSuccess: (data) => { + const notes = qc.getQueryData(["notes", "@me"]) as Note[] | undefined; + if (notes) { + qc.setQueryData(["notes", "@me"], { + ...notes, + data: [...notes, data], + }); + } + }, + }); +}; diff --git a/web/src/hooks/useCreateUser.ts b/web/src/hooks/useCreateUser.ts new file mode 100644 index 0000000..a230d0f --- /dev/null +++ b/web/src/hooks/useCreateUser.ts @@ -0,0 +1,27 @@ +import {useMutation, useQueryClient} from "@tanstack/react-query"; +import {Effect, Either} from "effect"; +import {createUser, UserCreation} from "../api/auth.ts"; +import {runtime} from "../api"; + +type T = Effect.Effect.Success>; + +export const useCreateUser = () => { + const qc = useQueryClient(); + + return useMutation< + Either.Either.Right, + Either.Either.Left, + UserCreation + >({ + mutationKey: ["user", "@me"], + mutationFn: async (m) => { + const v = await createUser(m).pipe(runtime.runPromise); + if (Either.isLeft(v)) throw v.left; + + return v.right; + }, + onSuccess: (data) => { + qc.setQueryData(["user", "@me"], data); + }, + }); +}; diff --git a/web/src/index.css b/web/src/index.css new file mode 100644 index 0000000..b5c61c9 --- /dev/null +++ b/web/src/index.css @@ -0,0 +1,3 @@ +@tailwind base; +@tailwind components; +@tailwind utilities; diff --git a/web/src/instrument.ts b/web/src/instrument.ts new file mode 100644 index 0000000..243193c --- /dev/null +++ b/web/src/instrument.ts @@ -0,0 +1,28 @@ +import * as Sentry from "@sentry/react"; +import {ENVIRONMENT, SENTRY_DSN} from "./env.ts"; +import {router} from "./routing.ts"; + +const sampleRate = + ENVIRONMENT === "development" ? 1 : ENVIRONMENT === "staging" ? 0.5 : 0.2; + +Sentry.init({ + dsn: SENTRY_DSN, + environment: ENVIRONMENT, + sampleRate, + tracesSampleRate: 1.0, + // TODO: Add more targets + tracePropagationTargets: [/^\//, /^http(s)?:\/\/localhost\//], + profilesSampleRate: 1.0, + integrations: [Sentry.tanstackRouterBrowserTracingIntegration(router)], +}); + +if (SENTRY_DSN) { + console.log( + "Sentry initialized with DSN", + SENTRY_DSN, + "and environment", + ENVIRONMENT, + ); +} else { + console.warn("Sentry not initialized because no DSN is set"); +} diff --git a/web/src/main.tsx b/web/src/main.tsx new file mode 100644 index 0000000..c08e196 --- /dev/null +++ b/web/src/main.tsx @@ -0,0 +1,23 @@ +import "./instrument"; + +import React from "react"; +import * as Sentry from "@sentry/react"; +import ReactDOM from "react-dom/client"; +import {ReactQueryDevtools} from "@tanstack/react-query-devtools"; +import "./index.css"; +import {RouterProvider} from "@tanstack/react-router"; +import {QueryClient, QueryClientProvider} from "@tanstack/react-query"; +import {router} from "./routing.ts"; + +const queryClient = new QueryClient(); + +const App = Sentry.withProfiler(() => ( + + + + + + +)); + +ReactDOM.createRoot(document.getElementById("root")!).render(); diff --git a/web/src/routeTree.gen.ts b/web/src/routeTree.gen.ts new file mode 100644 index 0000000..5f4b2d1 --- /dev/null +++ b/web/src/routeTree.gen.ts @@ -0,0 +1,100 @@ +/* prettier-ignore-start */ + +/* eslint-disable */ + +// @ts-nocheck + +// noinspection JSUnusedGlobalSymbols + +// This file is auto-generated by TanStack Router + +import { createFileRoute } from '@tanstack/react-router' + +// Import Routes + +import { Route as rootRoute } from './routes/__root' + +// Create Virtual Routes + +const RegisterLazyImport = createFileRoute('/register')() +const PostLazyImport = createFileRoute('/post')() +const IndexLazyImport = createFileRoute('/')() + +// Create/Update Routes + +const RegisterLazyRoute = RegisterLazyImport.update({ + path: '/register', + getParentRoute: () => rootRoute, +} as any).lazy(() => import('./routes/register.lazy').then((d) => d.Route)) + +const PostLazyRoute = PostLazyImport.update({ + path: '/post', + getParentRoute: () => rootRoute, +} as any).lazy(() => import('./routes/post.lazy').then((d) => d.Route)) + +const IndexLazyRoute = IndexLazyImport.update({ + path: '/', + getParentRoute: () => rootRoute, +} as any).lazy(() => import('./routes/index.lazy').then((d) => d.Route)) + +// Populate the FileRoutesByPath interface + +declare module '@tanstack/react-router' { + interface FileRoutesByPath { + '/': { + id: '/' + path: '/' + fullPath: '/' + preLoaderRoute: typeof IndexLazyImport + parentRoute: typeof rootRoute + } + '/post': { + id: '/post' + path: '/post' + fullPath: '/post' + preLoaderRoute: typeof PostLazyImport + parentRoute: typeof rootRoute + } + '/register': { + id: '/register' + path: '/register' + fullPath: '/register' + preLoaderRoute: typeof RegisterLazyImport + parentRoute: typeof rootRoute + } + } +} + +// Create and export the route tree + +export const routeTree = rootRoute.addChildren({ + IndexLazyRoute, + PostLazyRoute, + RegisterLazyRoute, +}) + +/* prettier-ignore-end */ + +/* ROUTE_MANIFEST_START +{ + "routes": { + "__root__": { + "filePath": "__root.tsx", + "children": [ + "/", + "/post", + "/register" + ] + }, + "/": { + "filePath": "index.lazy.tsx" + }, + "/post": { + "filePath": "post.lazy.tsx" + }, + "/register": { + "filePath": "register.lazy.tsx" + } + } +} +ROUTE_MANIFEST_END */ diff --git a/web/src/routes/__root.tsx b/web/src/routes/__root.tsx new file mode 100644 index 0000000..4db35bc --- /dev/null +++ b/web/src/routes/__root.tsx @@ -0,0 +1,31 @@ +import {createRootRoute, Link, Outlet} from "@tanstack/react-router"; +import {TanStackRouterDevtools} from "@tanstack/router-devtools"; +import {ReactNode} from "react"; + +function Wrapper({children}: { children: ReactNode }) { + return ( + <> +
+ + Home + +
+
+ {children} + + + ); +} + +export const Route = createRootRoute({ + component: () => ( + + + + ), + // errorComponent: (props) => ( + // + // + // + // ), +}); diff --git a/web/src/routes/index.lazy.tsx b/web/src/routes/index.lazy.tsx new file mode 100644 index 0000000..2f9680f --- /dev/null +++ b/web/src/routes/index.lazy.tsx @@ -0,0 +1,13 @@ +import {createLazyFileRoute} from "@tanstack/react-router"; + +export const Route = createLazyFileRoute("/")({ + component: Index, +}); + +function Index() { + return ( +
+

Welcome Home!

+
+ ); +} diff --git a/web/src/routes/post.lazy.tsx b/web/src/routes/post.lazy.tsx new file mode 100644 index 0000000..710e221 --- /dev/null +++ b/web/src/routes/post.lazy.tsx @@ -0,0 +1,42 @@ +import {createLazyFileRoute} from "@tanstack/react-router"; +import {useCreateNote} from "../hooks/useCreateNote.ts"; +import {NoteCreation} from "../api/notes.ts"; +import {useState} from "react"; + +export const Route = createLazyFileRoute("/post")({ + component: Post, +}); + +function Post() { + const post = useCreateNote(); + const [content, setContent] = useState(""); + + return ( +
+