diff --git a/.github/workflows/gcp-deploy-frontend.yaml b/.github/workflows/gcp-deploy-frontend.yaml index dcf577b07b..fc97a35bdd 100644 --- a/.github/workflows/gcp-deploy-frontend.yaml +++ b/.github/workflows/gcp-deploy-frontend.yaml @@ -78,6 +78,9 @@ jobs: GRAPHQL_URL: ${{env.GRAPHQL_URL}} FRONTEND_TAG: ${{env.FRONTEND_TAG}} APP_ENV: ${{env.APP_ENV}} + GOOGLE_CAL_PROJECT_NUMBER: ${{env.GOOGLE_CAL_PROJECT_NUMBER}} + GOOGLE_CAL_CLIENT_EMAIL: ${{env.GOOGLE_CAL_CLIENT_EMAIL}} + GOOGLE_CAL_CALENDAR_ID: ${{env.GOOGLE_CAL_CALENDAR_ID}} steps: - name: First Interpolation of Variables @@ -114,23 +117,18 @@ jobs: needs: [env] steps: - - name: "Checkout: ${{github.ref_name}}" - uses: actions/checkout@v2 - with: - ref: ${{github.event.after}} - - name: Set Up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: "Login to Registry: ${{needs.env.outputs.DOCKER_REGISTRY}}" - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ${{needs.env.outputs.DOCKER_REGISTRY}} username: _json_key password: ${{secrets.GCP_SA_KEY}} - name: "Build & Push Container Image: ${{needs.env.outputs.FRONTEND_TAG}}" - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: docker/frontend/Dockerfile tags: ${{needs.env.outputs.FRONTEND_TAG}} @@ -141,10 +139,15 @@ jobs: FRONTEND_URL=${{needs.env.outputs.FRONTEND_URL}} YOUTUBE_API_KEY=${{secrets.YOUTUBE_API_KEY}} IMGIX_TOKEN=${{secrets.IMGIX_TOKEN}} - HONEYBADGER_API_KEY=${{secrets.HONEYBADGER_API_KEY}} - USERBACK_TOKEN=${{secrets.USERBACK_TOKEN}} - WEB3_STORAGE_TOKEN=${{secrets.WEB3_STORAGE_TOKEN}} + HONEYBADGER_API_KEY=${{secrets.HONEYBADGER_API_KEY}} + GOOGLE_ANALYTICS_ID=${{secrets.GOOGLE_ANALYTICS_ID}} + USERBACK_TOKEN=${{secrets.USERBACK_TOKEN}} + WEB3_STORAGE_TOKEN=${{secrets.WEB3_STORAGE_TOKEN}} OPENSEA_API_KEY=${{secrets.OPENSEA_API_KEY}} + GCAL_PRIVATE_KEY=${{secrets.GCAL_PRIVATE_KEY}} + GCAL_CLIENT_EMAIL=${{secrets.GCAL_CLIENT_EMAIL}} + GCAL_PROJECT_NUMBER=${{secrets.GCAL_PROJECT_NUMBER}} + GCAL_CALENDAR_ID=${{secrets.GCAL_CALENDAR_ID}} push: true deploy-frontend: @@ -170,7 +173,11 @@ jobs: --memory 512Mi \ --ingress all \ --min-instances 1 \ - --allow-unauthenticated + --allow-unauthenticated \ + --set-env-vars GCAL_PRIVATE_KEY="${{secrets.GCAL_PRIVATE_KEY}}" \ + --set-env-vars GCAL_PROJECT_NUMBER="${{secrets.GCAL_PROJECT_NUMBER}}" \ + --set-env-vars GCAL_CLIENT_EMAIL="${{secrets.GCAL_CLIENT_EMAIL}}" \ + --set-env-vars NEXT_PUBLIC_GCAL_CALENDAR_ID="${{secrets.GCAL_CALENDAR_ID}}" finish-deployment: name: Finish Deployment diff --git a/.github/workflows/gcp-deploy-persistent-instances.yaml b/.github/workflows/gcp-deploy-persistent-instances.yaml index b245fd6b74..68c1896ec3 100644 --- a/.github/workflows/gcp-deploy-persistent-instances.yaml +++ b/.github/workflows/gcp-deploy-persistent-instances.yaml @@ -29,6 +29,9 @@ env: FRONTEND_PORT: 3000 HASURA_SECRET: metagame_secret APP_ENV: development + GOOGLE_CAL_PROJECT_NUMBER: 510169944888 + GOOGLE_CAL_CLIENT_EMAIL: metagamecalwebsite@metagamecal.iam.gserviceaccount.com + GOOGLE_CAL_CALENDAR_ID: nih59ktgafmm64ed4qk6ue8vv4 jobs: start-deployment: @@ -93,6 +96,9 @@ jobs: BACKEND_TAG: ${{env.BACKEND_TAG}} HASURA_TAG: ${{env.HASURA_TAG}} FRONTEND_TAG: ${{env.FRONTEND_TAG}} + GOOGLE_CAL_PROJECT_NUMBER: ${{env.GOOGLE_CAL_PROJECT_NUMBER}} + GOOGLE_CAL_CLIENT_EMAIL: ${{env.GOOGLE_CAL_CLIENT_EMAIL}} + GOOGLE_CAL_CALENDAR_ID: ${{env.GOOGLE_CAL_CALENDAR_ID}} steps: - name: First Interpolation of Variables @@ -296,10 +302,12 @@ jobs: FRONTEND_URL=${{needs.env.outputs.FRONTEND_URL}} YOUTUBE_API_KEY=${{secrets.YOUTUBE_API_KEY}} IMGIX_TOKEN=${{secrets.IMGIX_TOKEN}} - HONEYBADGER_API_KEY=${{secrets.HONEYBADGER_API_KEY}} - USERBACK_TOKEN=${{secrets.USERBACK_TOKEN}} - WEB3_STORAGE_TOKEN=${{secrets.WEB3_STORAGE_TOKEN}} + HONEYBADGER_API_KEY=${{secrets.HONEYBADGER_API_KEY}} + USERBACK_TOKEN=${{secrets.USERBACK_TOKEN}} + WEB3_STORAGE_TOKEN=${{secrets.WEB3_STORAGE_TOKEN}} OPENSEA_API_KEY=${{secrets.OPENSEA_API_KEY}} + GOOGLE_CAL_API_KEY=${{secrets.GOOGLE_CAL_API_KEY}} + GOOGLE_CAL_PRIVATE_KEY=${{secrets.GOOGLE_CAL_PRIVATE_KEY}} push: true deploy-frontend: diff --git a/.github/workflows/gcp-deploy-pr.yaml b/.github/workflows/gcp-deploy-pr.yaml index 6e0c734c6d..462af4cd4d 100644 --- a/.github/workflows/gcp-deploy-pr.yaml +++ b/.github/workflows/gcp-deploy-pr.yaml @@ -30,7 +30,6 @@ env: HASURA_PORT: 8080 FRONTEND_PORT: 3000 HASURA_SECRET: metagame_secret - GA4_ID: G-B1NKK3Q1BP APP_ENV: development jobs: @@ -249,23 +248,18 @@ jobs: needs: [env, delete-backend, undeploy-backend] steps: - - name: "Checkout: ${{github.event.pull_request.head.label}}" - uses: actions/checkout@v3 - with: - ref: ${{github.event.pull_request.head.sha}} - - name: Set Up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: "Login to Registry: ${{needs.env.outputs.DOCKER_REGISTRY}}" - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ${{needs.env.outputs.DOCKER_REGISTRY}} username: _json_key password: ${{secrets.GCP_SA_KEY}} - name: "Build & Push Container Image: ${{needs.env.outputs.BACKEND_TAG}}" - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: docker/backend/Dockerfile tags: ${{needs.env.outputs.BACKEND_TAG}} @@ -361,17 +355,17 @@ jobs: ref: ${{github.event.pull_request.head.sha}} - name: Set Up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: "Login to Registry: ${{needs.env.outputs.DOCKER_REGISTRY}}" - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ${{needs.env.outputs.DOCKER_REGISTRY}} username: _json_key password: ${{secrets.GCP_SA_KEY}} - name: "Build & Push Container Image: ${{needs.env.outputs.HASURA_TAG}}" - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: hasura/Dockerfile context: hasura @@ -459,23 +453,18 @@ jobs: needs: [env, delete-frontend, undeploy-frontend, deploy-hasura, seed-db] steps: - - name: "Checkout: ${{github.event.pull_request.head.label}}" - uses: actions/checkout@v3 - with: - ref: ${{github.event.pull_request.head.sha}} - - name: Set Up Docker Buildx - uses: docker/setup-buildx-action@v2 + uses: docker/setup-buildx-action@v3 - name: "Login to Registry: ${{needs.env.outputs.DOCKER_REGISTRY}}" - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: registry: ${{needs.env.outputs.DOCKER_REGISTRY}} username: _json_key password: ${{secrets.GCP_SA_KEY}} - name: "Build & Push Container Image: ${{needs.env.outputs.FRONTEND_TAG}}" - uses: docker/build-push-action@v4 + uses: docker/build-push-action@v5 with: file: docker/frontend/Dockerfile tags: ${{needs.env.outputs.FRONTEND_TAG}} @@ -486,8 +475,12 @@ jobs: FRONTEND_URL=${{needs.env.outputs.FRONTEND_URL}} YOUTUBE_API_KEY=${{secrets.YOUTUBE_API_KEY}} IMGIX_TOKEN=${{secrets.IMGIX_TOKEN}} - WEB3_STORAGE_TOKEN=${{secrets.WEB3_STORAGE_TOKEN}} + WEB3_STORAGE_TOKEN=${{secrets.WEB3_STORAGE_TOKEN}} OPENSEA_API_KEY=${{secrets.OPENSEA_API_KEY}} + GCAL_PRIVATE_KEY=${{secrets.GCAL_PRIVATE_KEY}} + GCAL_PROJECT_NUMBER=${{secrets.GCAL_PROJECT_NUMBER}} + GCAL_CLIENT_EMAIL=${{secrets.GCAL_CLIENT_EMAIL}} + GCAL_CALENDAR_ID=${{secrets.GCAL_CALENDAR_ID}} push: true deploy-frontend: @@ -519,7 +512,11 @@ jobs: --memory ${{needs.env.outputs.FRONTEND_TARGET == 'development' && '5Gi' || '512Mi'}} \ --ingress all \ --max-instances 1 \ - --allow-unauthenticated + --allow-unauthenticated \ + --set-env-vars GCAL_PRIVATE_KEY="${{secrets.GCAL_PRIVATE_KEY}}" \ + --set-env-vars GCAL_PROJECT_NUMBER="${{secrets.GCAL_PROJECT_NUMBER}}" \ + --set-env-vars GCAL_CLIENT_EMAIL="${{secrets.GCAL_CLIENT_EMAIL}}" \ + --set-env-vars NEXT_PUBLIC_GCAL_CALENDAR_ID="${{secrets.GCAL_CALENDAR_ID}}" seed-db: name: Seed Database diff --git a/.node-version b/.node-version index 47979412e9..209e3ef4b6 100644 --- a/.node-version +++ b/.node-version @@ -1 +1 @@ -16.20.1 +20 diff --git a/docker/backend/Dockerfile b/docker/backend/Dockerfile index 55daf74f53..96d5a95ca2 100644 --- a/docker/backend/Dockerfile +++ b/docker/backend/Dockerfile @@ -1,9 +1,9 @@ -FROM node:16-slim as base +FROM node:20-slim as base WORKDIR /usr/src/app # Install dependencies not included in the slim image RUN apt-get update && \ - apt-get install -y --no-install-recommends g++ make python git openssl && \ + apt-get install -y --no-install-recommends g++ make python3 git openssl && \ apt-get install -y --no-install-recommends --reinstall ca-certificates # Install dependencies for dev and prod @@ -44,7 +44,7 @@ RUN yarn backend:build RUN yarn install --pure-lockfile --production --ignore-scripts --prefer-offline # Create completely new stage including only necessary files -FROM node:16-slim as app +FROM node:20-slim as app WORKDIR /app # Copy necessary files into the stage diff --git a/docker/discord-bot/Dockerfile b/docker/discord-bot/Dockerfile index 346f49f401..4844aec3b1 100644 --- a/docker/discord-bot/Dockerfile +++ b/docker/discord-bot/Dockerfile @@ -1,9 +1,9 @@ -FROM node:16-slim as base +FROM node:20-slim as base WORKDIR /usr/src/app # Install dependencies not included in the slim image RUN apt-get update && \ - apt-get install -y --no-install-recommends g++ make python git openssl && \ + apt-get install -y --no-install-recommends g++ make python3 git openssl && \ apt-get install -y --no-install-recommends --reinstall ca-certificates # Install dependencies for dev and prod @@ -38,7 +38,7 @@ RUN yarn discord-bot build RUN yarn install --pure-lockfile --production --ignore-scripts --prefer-offline # Create completely new stage including only necessary files -FROM node:16-slim as app +FROM node:20-slim as app WORKDIR /app # Needed at runtime diff --git a/docker/frontend/Dockerfile b/docker/frontend/Dockerfile index f17dd42955..ad5dd8dd8e 100644 --- a/docker/frontend/Dockerfile +++ b/docker/frontend/Dockerfile @@ -7,11 +7,11 @@ # site or dev environment respectively. ARG TARGET=production -FROM node:16-slim AS base +FROM node:20-slim AS base WORKDIR /usr/src/app # Install dependencies not included in the slim image -RUN apt-get update && apt-get install -y --no-install-recommends g++ make python git ca-certificates +RUN apt-get update && apt-get install -y --no-install-recommends g++ make python3 git ca-certificates # Install dependencies for dev and prod COPY package.json ./ @@ -26,7 +26,6 @@ COPY packages/web/graphql ./packages/web/graphql/ COPY packages/utils/*.json ./packages/utils/ COPY packages/design-system/*.json ./packages/design-system/ -RUN yarn policies set-version 1.15.2 RUN yarn install --pure-lockfile FROM base AS build @@ -47,10 +46,15 @@ ARG FRONTEND_URL https://metagame.wtf ARG IMGIX_TOKEN ARG YOUTUBE_API_KEY ARG HONEYBADGER_API_KEY +ARG GOOGLE_ANALYTICS_ID ARG USERBACK_TOKEN ARG CERAMIC_URL https://ceramic.metagame.wtf ARG WEB3_STORAGE_TOKEN ARG OPENSEA_API_KEY +ARG GCAL_CALENDAR_ID +ARG GCAL_PRIVATE_KEY +ARG GCAL_CLIENT_EMAIL +ARG GCAL_PROJECT_NUMBER # ARGs are not available at runtime, so define ENV variables # These ENVs should match the --set-env-vars in `.github/workflows/gcp-deploy.yaml` @@ -61,11 +65,16 @@ ENV NEXT_PUBLIC_FRONTEND_URL $FRONTEND_URL ENV NEXT_PUBLIC_IMGIX_TOKEN $IMGIX_TOKEN ENV NEXT_PUBLIC_YOUTUBE_API_KEY $YOUTUBE_API_KEY ENV NEXT_PUBLIC_HONEYBADGER_API_KEY $HONEYBADGER_API_KEY +ENV NEXT_PUBLIC_GOOGLE_ANALYTICS_ID $GOOGLE_ANALYTICS_ID ENV NEXT_PUBLIC_USERBACK_TOKEN $USERBACK_TOKEN ENV NEXT_PUBLIC_CERAMIC_URL $CERAMIC_URL +ENV NEXT_PUBLIC_GCAL_CALENDAR_ID $GCAL_CALENDAR_ID # These are not exposed to the browser ENV WEB3_STORAGE_TOKEN $WEB3_STORAGE_TOKEN ENV OPENSEA_API_KEY $OPENSEA_API_KEY +ENV GCAL_PRIVATE_KEY $GCAL_PRIVATE_KEY +ENV GCAL_CLIENT_EMAIL $GCAL_CLIENT_EMAIL +ENV GCAL_PROJECT_NUMBER $GCAL_PROJECT_NUMBER ONBUILD RUN yarn web:build # Delete devDependencies @@ -77,7 +86,7 @@ ONBUILD RUN yarn web:deps:build FROM "build-$TARGET" as built # New stage including only necessary files -FROM node:16-slim AS app +FROM node:20-slim AS app WORKDIR /app # Copy necessary files into the stage diff --git a/guides/DESCRIPTIVE_C4.MD b/guides/DESCRIPTIVE_C4.MD index 6ec1f17e46..dab0525abb 100644 --- a/guides/DESCRIPTIVE_C4.MD +++ b/guides/DESCRIPTIVE_C4.MD @@ -1,14 +1,16 @@ -A deeper understanding of the C4 -================================= -I was very surprised to learn that the core software which ultra-reliable high frequency trading platforms are built upon was constructed using a software development process with no leadership, no vision, no roadmaps, no planning, and no meetings. +# A deeper understanding of the C4 + +================================== + +I was very surprised to learn that the core software which ultra-reliable high frequency trading platforms are built upon was constructed using a software development process with no leadership, no vision, no roadmaps, no planning, and no meetings. ZeroMQ is the [core that these systems are built on](https://umbrella.cisco.com/blog/2015/11/05/the-avalanche-project-when-high-frequency-trading-meets-traffic-classification/), and ZeroMQ was built with the C4. The reason Blockrazor and Krazor use the C4 is quite simple: centralization is a blocking process which yields inaccurate results. -The C4 is a hill-climbing algorithm, and an evolution of the GitHub [Fork + Pull Model](http://help.github.com/send-pull-requests/). It is an _extremely_ powerful and fully battle-tested approach to developing software, with proven results. It's probably not possible for any non-C4 project to win in the free market against a project that (properly) uses the C4. Now we get to find out if this holds true in the fierce and brutal cryptocurrency battleground. +The C4 is a hill-climbing algorithm, and an evolution of the GitHub [Fork + Pull Model](http://help.github.com/send-pull-requests/). It is an *extremely* powerful and fully battle-tested approach to developing software, with proven results. It's probably not possible for any non-C4 project to win in the free market against a project that (properly) uses the C4. Now we get to find out if this holds true in the fierce and brutal cryptocurrency battleground. +## Language -Language --------- +--- > The key words "MUST", "MUST NOT", "REQUIRED", "SHALL", "SHALL NOT", "SHOULD", "SHOULD NOT", "RECOMMENDED", "MAY", and "OPTIONAL" in this document are to be interpreted as described in RFC 2119. @@ -16,10 +18,11 @@ By starting with the RFC 2119 language, the C4 text makes very clear its intenti I think C4 is the first time anyone has attempted to codify a community's rulebook as a formal and reusable protocol spec. Previously, ZMQ rules were spread out over several wiki pages and were quite specific to libzmq in many ways. But experience teaches us that the more formal, accurate, and reusable the rules, the easier it is for strangers to collaborate up-front. And less friction means a more scalable community. At the time of C4, we also had some disagreement in the libzmq project over precisely what process we were using. Not everyone felt bound by the same rules. Let's just say some people felt they had a special status, which created friction with the rest of the community. So codification made things clear. -It's easy to use C4: just host your project on GitHub, get one other person to join, and open the floor to pull requests. In your README, put a link to C4 and that's it. We've done this in quite a few projects and it does seem to work. I've been pleasantly surprised a few times just applying these rules to my own work, like CZMQ. _None_ of us are so amazing that we can work without others. +It's easy to use C4: just host your project on GitHub, get one other person to join, and open the floor to pull requests. In your README, put a link to C4 and that's it. We've done this in quite a few projects and it does seem to work. I've been pleasantly surprised a few times just applying these rules to my own work, like CZMQ. *None* of us are so amazing that we can work without others. -Goals ------ +## Goals + +--- > C4 is meant to provide a reusable optimal collaboration model for open source software projects. @@ -31,19 +34,19 @@ Making C4 reusable is therefore really important. To learn more about the best p > It has these specific goals: To maximize the scale of the community around a project, by reducing the friction for new Contributors and creating a scaled participation model with strong positive feedbacks; -The number one goal is size and health of the community--not technical quality, not profits, not performance, not market share. The goal is simply the number of people who contribute to the project. The science here is simple: the larger the community, the more accurate the results (The Wisdom of Crowds provides a basic introduction to the research in this field). +The number one goal is size and health of the community—not technical quality, not profits, not performance, not market share. The goal is simply the number of people who contribute to the project. The science here is simple: the larger the community, the more accurate the results (The Wisdom of Crowds provides a basic introduction to the research in this field). > To relieve dependencies on key individuals by separating different skill sets so that there is a larger pool of competence in any required domain; -Perhaps the worst problem we faced in libzmq was dependence on people who could understand the code, manage GitHub branches, and make clean releases--all at the same time. It's like looking for athletes who can run marathons and sprint, swim, and also lift weights. We humans are really good at specialization. Asking us to be really good at two contradictory things reduces the number of candidates sharply, which is a Bad Thing for any project. We had this problem severely in libzmq in 2009 or so, and fixed it by splitting the role of maintainer into two: one person makes patches and another makes releases. +Perhaps the worst problem we faced in libzmq was dependence on people who could understand the code, manage GitHub branches, and make clean releasesu2014all at the same time. It's like looking for athletes who can run marathons and sprint, swim, and also lift weights. We humans are really good at specialization. Asking us to be really good at two contradictory things reduces the number of candidates sharply, which is a Bad Thing for any project. We had this problem severely in libzmq in 2009 or so, and fixed it by splitting the role of maintainer into two: one person makes patches and another makes releases. > To allow the project to develop faster and more accurately, by increasing the diversity of the decision making process; -This is theory--not fully proven, but not falsified. The diversity of the community and the number of people who can weigh in on discussions, without fear of being criticized or dismissed, the faster and more accurately the software develops. Speed is quite subjective here. Going very fast in the wrong direction is not just useless, it's actively damaging (and we suffered a lot of that in libzmq before we switched to C4). +This is theory—not fully proven, but not falsified. The diversity of the community and the number of people who can weigh in on discussions, without fear of being criticized or dismissed, the faster and more accurately the software develops. Speed is quite subjective here. Going very fast in the wrong direction is not just useless, it's actively damaging (and we suffered a lot of that in libzmq before we switched to C4). > To support the natural life cycle of project versions from experimental through to stable, by allowing safe experimentation, rapid failure, and isolation of stable code; -It's quite an interesting effect of the process: _the git master is almost always perfectly stable_. This has to do with the size of changes and their _latency_, i.e., the time between someone writing the code and someone actually using it fully. However, the healthy design learning process tends to cycle through drafts until becoming stable, and inviolable. +It's quite an interesting effect of the process: *the git master is almost always perfectly stable*. This has to do with the size of changes and their *latency*, i.e., the time between someone writing the code and someone actually using it fully. However, the healthy design learning process tends to cycle through drafts until becoming stable, and inviolable. > To reduce the internal complexity of project repositories, thus making it easier for Contributors to participate and reducing the scope for error; @@ -53,8 +56,9 @@ Curious observation: people who thrive in complex situations like to create comp Ultimately, we're economic creatures, and the sense that "we own this, and our work can never be used against us" makes it much easier for people to invest in an open source project like ZeroMQ. And it can't be just a feeling, it has to be real. There are a number of aspects to making collective ownership work, we'll see these one-by-one as we go through C4. -Preliminaries -------------- +## Preliminaries + +--- > The project SHALL use the git distributed revision control system. @@ -78,7 +82,7 @@ Now we move on to definitions of the parties, and the splitting of roles that sa > Contributors SHALL NOT have commit access to the repository unless they are also Maintainers. Maintainers SHALL have commit access to the repository. -What we wanted to avoid was people pushing their changes directly to master. This was the biggest source of trouble in libzmq historically: large masses of raw code that took months or years to fully stabilize. We eventually followed other ZeroMQ projects like PyZMQ in using pull requests. We went further, and stipulated that _all_ changes had to follow the same path. No exceptions for "special people". +What we wanted to avoid was people pushing their changes directly to master. This was the biggest source of trouble in libzmq historically: large masses of raw code that took months or years to fully stabilize. We eventually followed other ZeroMQ projects like PyZMQ in using pull requests. We went further, and stipulated that *all* changes had to follow the same path. No exceptions for "special people". > Everyone, without distinction or discrimination, SHALL have an equal right to become a Contributor under the terms of this contract. @@ -86,8 +90,9 @@ We had to state this explicitly. It used to be that the libzmq maintainers would I think this fight between individual expertise and collective intelligence plays out in other areas. It defined Wikipedia, and still does, a decade after that work surpassed anything built by small groups of experts. For me, we make software by slowly synthesizing the most accurate knowledge, much as we make Wikipedia articles. -Licensing and Ownership ------------------------ +## Licensing and Ownership + +--- > The project SHALL use a share-alike license such as the MPLv2, or a GPLv3 variant thereof (GPL, LGPL, AGPL). @@ -99,14 +104,15 @@ This removes the need for any specific license or contribution agreement for pat > All patches are owned by their authors. There SHALL NOT be any copyright assignment process. -Here we come to the key reason people trust their investments in ZeroMQ: it's logistically impossible to buy the copyrights to create a closed source competitor to ZeroMQ. iMatix can't do this either. And the more people that send patches, the harder it becomes. ZeroMQ isn't just free and open today--this specific rule means it will remain so forever. Note that it's not the case in all MPLv2/GPL projects, many of which still ask for copyright transfer back to the maintainers. +Here we come to the key reason people trust their investments in ZeroMQ: it's logistically impossible to buy the copyrights to create a closed source competitor to ZeroMQ. iMatix can't do this either. And the more people that send patches, the harder it becomes. ZeroMQ isn't just free and open today—this specific rule means it will remain so forever. Note that it's not the case in all MPLv2/GPL projects, many of which still ask for copyright transfer back to the maintainers. > Each Contributor SHALL be responsible for identifying themselves in the project Contributor list. In other words, the maintainers are not karma accountants. Anyone who wants credit has to claim it themselves. -Patch Requirements ------------------- +## Patch Requirements + +--- In this section, we define the obligations of the contributor: specifically, what constitutes a "valid" patch, so that maintainers have rules they can use to accept or reject patches. @@ -124,7 +130,7 @@ This is just sanity. I've spent time cleaning up other peoples' patches because > A patch MUST adhere to the "Evolution of Public Contracts" guidelines defined below. -Ah, the pain, the pain. I'm not speaking of the time at age eight when I stepped on a plank with a 4-inch nail protruding from it. That was relatively OK. I'm speaking of 2010-2011 when we had multiple parallel releases of ZeroMQ, each with different _incompatible_ APIs or wire protocols. It was an exercise in bad rules, pointlessly enforced, that still hurts us today. The rule was, "If you change the API or protocol, you SHALL create a new major version". Give me the nail through the foot; that hurt less. +Ah, the pain, the pain. I'm not speaking of the time at age eight when I stepped on a plank with a 4-inch nail protruding from it. That was relatively OK. I'm speaking of 2010-2011 when we had multiple parallel releases of ZeroMQ, each with different *incompatible* APIs or wire protocols. It was an exercise in bad rules, pointlessly enforced, that still hurts us today. The rule was, "If you change the API or protocol, you SHALL create a new major version". Give me the nail through the foot; that hurt less. One of the big changes we made with C4 was simply to ban, outright, this kind of sanctioned sabotage. Amazingly, it's not even hard. We just don't allow the breaking of existing public contracts, period, unless everyone agrees, in which case no period. As Linus Torvalds famously put it on 23 December 2012, "WE DO NOT BREAK USERSPACE!" @@ -136,7 +142,7 @@ This rule has two effects. The first is that it forces people to make minimal so For cross-platform projects, it is fair to ask that the patch works on the development box used by the contributor. -> * A patch commit message MUST consist of a single short (less than 50 characters) line stating the problem ("Problem: ...") being solved, followed by a blank line and then the proposed solution ("Solution: ..."). +> * A patch commit message MUST consist of a single short (less than 50 characters) line stating the problem ("Problem: ...") being solved, followed by a blank line and then the proposed solution ("Solution: ..."). This is a good format for commit messages that fits into email (the first line becomes the subject, and the rest becomes the email body). @@ -144,8 +150,9 @@ This is a good format for commit messages that fits into email (the first line b Just in case it wasn't clear, we're back to legalese and definitions. -Development Process -------------------- +## Development Process + +--- In this section, we aim to describe the actual development process, step-by-step. @@ -203,7 +210,7 @@ We already said this but it's worth repeating: the role of Maintainer is not to > Maintainers SHALL merge correct patches rapidly. -There is a criteria I call _change latency_, which is the round-trip time from identifying a problem to testing a solution. The faster the better. If maintainers cannot respond to pull requests as rapidly as people expect, they're not doing their job (or they need more hands). +There is a criteria I call *change latency*, which is the round-trip time from identifying a problem to testing a solution. The faster the better. If maintainers cannot respond to pull requests as rapidly as people expect, they're not doing their job (or they need more hands). > Maintainers MAY merge incorrect patches from other Contributors with the goals of (a) ending fruitless discussions, (b) capturing toxic patches in the historical record, (c) engaging with the Contributor on improving their patch quality. @@ -215,34 +222,33 @@ In the worst case, patches can wait for weeks, or months, to be accepted. Or the PM is how most projects work, and I believe most projects get it wrong. Let me start by listing the problems PM creates: -* _It tells new contributors, "guilty until proven innocent,"_ which is a negative message that creates negative emotions. Contributors who feel unwelcome will always look for alternatives. Driving away contributors is bad. Making slow, quiet enemies is worse. - -* _It gives maintainers power over new contributors_, which many maintainers abuse. This abuse can be subconscious. Yet it is widespread. Maintainers inherently strive to remain important in their project. If they can keep out potential competitors by delaying and blocking their patches, they will. - -* _It opens the door to discrimination_. One can argue, a project belongs to its maintainers, so they can choose who they want to work with. My response is: projects that are not aggressively inclusive will die, and deserve to die. - -* _It slows down the learning cycle_. Innovation demands rapid experiment-failure-success cycles. Someone identifies a problem or inefficiency in a product. Someone proposes a fix. The fix is tested and works or fails. We have learned something new. The faster this cycle happens, the faster and more accurately the project can move. - -* _It gives outsiders the chance to troll the project_. It is a simple as raising an objection to a new patch. "I don't like this code." Discussions over details can use up much more effort than writing code. It is far cheaper to attack a patch than to make one. These economics favor the trolls and punish the honest contributors. - -* _It puts the burden of work on individual contributors_, which is ironic and sad for open source. We want to work together yet we're told to fix our work alone. - +* *It tells new contributors, "guilty until proven innocent,"* which is a negative message that creates negative emotions. Contributors who feel unwelcome will always look for alternatives. Driving away contributors is bad. Making slow, quiet enemies is worse. + +* *It gives maintainers power over new contributors*, which many maintainers abuse. This abuse can be subconscious. Yet it is widespread. Maintainers inherently strive to remain important in their project. If they can keep out potential competitors by delaying and blocking their patches, they will. + +* *It opens the door to discrimination*. One can argue, a project belongs to its maintainers, so they can choose who they want to work with. My response is: projects that are not aggressively inclusive will die, and deserve to die. + +* *It slows down the learning cycle*. Innovation demands rapid experiment-failure-success cycles. Someone identifies a problem or inefficiency in a product. Someone proposes a fix. The fix is tested and works or fails. We have learned something new. The faster this cycle happens, the faster and more accurately the project can move. + +* *It gives outsiders the chance to troll the project*. It is a simple as raising an objection to a new patch. "I don't like this code." Discussions over details can use up much more effort than writing code. It is far cheaper to attack a patch than to make one. These economics favor the trolls and punish the honest contributors. + +* *It puts the burden of work on individual contributors*, which is ironic and sad for open source. We want to work together yet we're told to fix our work alone. Now let's see how this works when we use Optimistic Merging, or OM. To start with, understand that not all patches nor all contributors are the same. We see at least four main cases in our open source projects: -1. Good contributors who know the rules and write excellent, perfect patches. -2. Good contributors who make mistakes, and who write useful yet broken patches. -3. Mediocre contributors who make patches that no-one notices or cares about. -4. Trollish contributors who ignore the rules, and who write toxic patches. +1. Good contributors who know the rules and write excellent, perfect patches. +2. Good contributors who make mistakes, and who write useful yet broken patches. +3. Mediocre contributors who make patches that no-one notices or cares about. +4. Trollish contributors who ignore the rules, and who write toxic patches. PM assumes all patches are toxic until proven good (4). Whereas in reality most patches tend to be useful, and worth improving (2). Let's see how each scenario works, with PM and OM: -1. PM: depending on unspecified, arbitrary criteria, patch may be merged rapidly or slowly. At least sometimes, a good contributor will be left with bad feelings. OM: good contributors feel happy and appreciated, and continue to provide excellent patches until they are done using the project. -2. PM: contributor retreats, fixes patch, comes back somewhat humiliated. OM: second contributor joins in to help first fix their patch. We get a short, happy patch party. New contributor now has a coach and friend in the project. -3. PM: we get a flamewar and everyone wonders why the community is so hostile. OM: the mediocre contributor is largely ignored. If patch needs fixing, it'll happen rapidly. Contributor loses interest and eventually the patch is reverted. -4. PM: we get a flamewar which troll wins by sheer force of argument. Community explodes in fight-or-flee emotions. Bad patches get pushed through. OM: existing contributor immediately reverts the patch. There is no discussion. Troll may try again, and eventually may be banned. Toxic patches remain in git history forever. +1. PM: depending on unspecified, arbitrary criteria, patch may be merged rapidly or slowly. At least sometimes, a good contributor will be left with bad feelings. OM: good contributors feel happy and appreciated, and continue to provide excellent patches until they are done using the project. +2. PM: contributor retreats, fixes patch, comes back somewhat humiliated. OM: second contributor joins in to help first fix their patch. We get a short, happy patch party. New contributor now has a coach and friend in the project. +3. PM: we get a flamewar and everyone wonders why the community is so hostile. OM: the mediocre contributor is largely ignored. If patch needs fixing, it'll happen rapidly. Contributor loses interest and eventually the patch is reverted. +4. PM: we get a flamewar which troll wins by sheer force of argument. Community explodes in fight-or-flee emotions. Bad patches get pushed through. OM: existing contributor immediately reverts the patch. There is no discussion. Troll may try again, and eventually may be banned. Toxic patches remain in git history forever. In each case, OM has a better outcome than PM. @@ -260,8 +266,9 @@ In essence, the goal here is to allow users to try patches rather than to spend Just keep the issue tracker clean. -Branches and Releases ---------------------- +## Branches and Releases + +--- When C4 is working, we get two massive simplifications of our delivery process. One, we don't need or use branches. Two, we deliver from master. @@ -277,8 +284,9 @@ I'll come to branches soon. In short (or "tl;dr", as they say on the webs), bran > To make a stable release a Maintainer shall tag the repository. Stable releases SHALL always be released from the repository master. -Evolution of Public Contracts ------------------------------ +## Evolution of Public Contracts + +--- By "public contracts", I mean APIs and protocols. Up until the end of 2011, libzmq's naturally happy state was marred by broken promises and broken contracts. We stopped making promises (aka "road maps") for libzmq completely, and our dominant theory of change is now that it emerges carefully and accurately over time. At a 2012 Chicago meetup, Garrett Smith and Chuck Remes called this the "drunken stumble to greatness", which is how I think of it now. @@ -290,7 +298,7 @@ You'd think this was a given for professional software engineers but no, it's no > All Public Contracts SHOULD have space for extensibility and experimentation. -Now, the real thing is that public contracts _do change_. It's not about not changing them. It's about changing them safely. This means educating (especially protocol) designers to create that space up-front. +Now, the real thing is that public contracts *do change*. It's not about not changing them. It's about changing them safely. This means educating (especially protocol) designers to create that space up-front. > A patch that modifies a stable Public Contract SHOULD not break existing applications unless there is overriding consensus on the value of doing this. @@ -298,16 +306,16 @@ Sometimes the patch is fixing a bad API that no one is using. It's a freedom we > A patch that introduces new features SHOULD do so using new names (a new contract). -We had the experience in ZeroMQ once or twice of new features using old names (or worse, using names that were _still in use_ elsewhere). ZeroMQ v3.0 had a newly introduced "ROUTER" socket that was totally different from the existing ROUTER socket in 2.x. Dear lord, you should be face-palming, why? The reason: apparently, even smart people sometimes need regulation to stop them doing silly things. +We had the experience in ZeroMQ once or twice of new features using old names (or worse, using names that were *still in use* elsewhere). ZeroMQ v3.0 had a newly introduced "ROUTER" socket that was totally different from the existing ROUTER socket in 2.x. Dear lord, you should be face-palming, why? The reason: apparently, even smart people sometimes need regulation to stop them doing silly things. > New contracts SHOULD be marked as "draft" until they are stable and used by real users. -> +> > Old contracts SHOULD be deprecated in a systematic fashion by marking new contracts as "draft" until they are stable, then marking the old contracts as "deprecated". This life cycle notation has the great benefit of actually telling users what is going on with a consistent direction. "Draft" means "we have introduced this and intend to make it stable if it works". It does not mean, "we have introduced this and will remove it at any time if we feel like it". One assumes that code that survives more than one patch cycle is meant to be there. "Deprecated" means "we have replaced this and intend to remove it". > Old contracts SHOULD be deprecated in a systematic fashion by marking them as "deprecated" and replacing them with new contracts as needed. -> +> > When sufficient time has passed, old deprecated contracts SHOULD be removed. In theory this gives applications time to move onto stable new contracts without risk. You can upgrade first, make sure things work, and then, over time, fix things up to remove dependencies on deprecated and legacy APIs and protocols. @@ -316,8 +324,9 @@ In theory this gives applications time to move onto stable new contracts without Ah, yes, the joy when ZeroMQ v3.x renamed the top-used API functions (`zmq_send\[3\]` and `zmq_recv\[3\]`) and then recycled the old names for new methods that were utterly incompatible (and which I suspect few people actually use). You should be slapping yourself in confusion again, but really, this is what happened and I was as guilty as anyone. After all, we did change the version number! The only benefit of that experience was to get this rule. -Project Administration ----------------------- +## Project Administration + +--- > The project founders SHALL act as Administrators to manage the set of project Maintainers. @@ -339,11 +348,12 @@ This was Ian Barber's suggestion: we need a way to crop inactive maintainers. Or Now and then, your projects will attract people of the wrong character. You will get better at seeing these people, over time. C4 helps in two ways. One, by setting out strong rules, it discourages the chaos-seekers and bullies, who cannot tolerate others' rules. Two, it gives you the Administrator the power to ban them. I like to give such people time, to show themselves, and get their patches on the public record (a reason to merge bad patches, which of course you can remove after a suitable pause). +## License and Further Reading -License and Further Reading ======= + The C4, along with this breakdown of it, was created (primarily) by the late Pieter Hintjens. If the C4 interests you it's a very good idea to check out Pieter's blog and watch his videos to get a deep understanding of his research into this field. Here's a good one to start with: [How Conway's law is eating your job.](https://www.youtube.com/watch?v=7HECD3eLoVo) -The text on this page is Copyright (c) 2007-2014 iMatix Corporation and Contributors, 2017-2018 Blockrazor and Contributors and is released under [CC-BY-SA-4](https://creativecommons.org/licenses/by/4.0/) +The text on this page is Copyright ©️ 2007-2014 iMatix Corporation and Contributors, 2017-2018 Blockrazor and Contributors and is released under [CC-BY-SA-4](https://creativecommons.org/licenses/by/4.0/) -This breakdown and description of the C4 is an excerpt from [chapter 6 of _ØMQ - The Guide_](http://zguide.zeromq.org/page:all#The-ZeroMQ-Process-C), which is maintained at [booksbyus/zguide on github](https://github.com/booksbyus/zguide/blob/master/chapter6.txt). +This breakdown and description of the C4 is an excerpt from [chapter 6 of _ØMQ ― The Guide_](http://zguide.zeromq.org/page:all#The-ZeroMQ-Process-C), which is maintained at [booksbyus/zguide on github](https://github.com/booksbyus/zguide/blob/master/chapter6.txt). diff --git a/hasura/clear-xp.mjs b/hasura/clear-xp.mjs new file mode 100755 index 0000000000..f2fd4f4af4 --- /dev/null +++ b/hasura/clear-xp.mjs @@ -0,0 +1,81 @@ +#!/usr/bin/env node + +import fetch from 'node-fetch' + +/* eslint-disable no-console */ + +const TARGET_GRAPHQL_URL = ( + process.env.TARGET_GRAPHQL_URL || 'http://localhost:8080/v1/graphql' +) +const HASURA_ADMIN_SECRET = ( + process.env.HASURA_ADMIN_SECRET || 'metagame_secret' +) + +const headers = { + 'content-type': 'application/json', + 'x-hasura-admin-secret': HASURA_ADMIN_SECRET, +} + +async function fetchGraphQL({ + url = TARGET_GRAPHQL_URL, opDoc, opName = null, variables = {} +}) { + const regex = /^\s*(query|mutation)\s+(\S+)\s*\{.*/s + opName ??= opDoc.replace(regex, '$2') + const result = await fetch(url, { + method: 'POST', + body: JSON.stringify({ + query: opDoc, + variables, + operationName: opName, + }), + headers, + }) + + const body = await result.text() + try { + return JSON.parse(body) + } catch(err) { + console.error(`JSON Error: ${err.message}`) + console.error(body) + throw err + } +} + +const clearBalancesMutation = /* GraphQL */` + mutation ClearBalances { + delete_balance(where: {}) { + affected_rows + } + } +`.trim() + +async function clearBalances() { + const { data } = await fetchGraphQL({ + opDoc: clearBalancesMutation, + }) + return data.delete_balance.affected_rows +} + +const resetOffsetsMutation = /* GraphQL */` + mutation ResetOffsets { + update_token(where: {}, _set: { lastBlockHeight: 42746520 }) { + affected_rows + } + } +`.trim() + +async function resetOffsets() { + const { data, errors } = await fetchGraphQL({ + opDoc: resetOffsetsMutation, + }) + if(!!errors) throw errors[0] + return data.update_token.affected_rows +} + +console.info(`Resetting the XP system for all guilds on ${TARGET_GRAPHQL_URL}…`) + +const numReset = await resetOffsets() +console.debug(`Reset ${numReset} guilds.`) + +const numCleared = await clearBalances() +console.debug(`Removed ${numCleared} balances.`) diff --git a/hasura/metadata/actions.graphql b/hasura/metadata/actions.graphql index 7dcdddbd38..0462fd232b 100644 --- a/hasura/metadata/actions.graphql +++ b/hasura/metadata/actions.graphql @@ -38,6 +38,10 @@ type Mutation { syncAllGuildDiscordMembers: [DiscordGuildsSyncOutput] } +type Mutation { + syncBalances: SyncBalancesOutput! +} + type Mutation { syncSourceCredAccounts: SourceCredSyncOutput } @@ -215,3 +219,41 @@ type UpdateComposeDBProfileResponse { fields: [String] } +type TokenReturn { + added: [BalanceOutput] + oldHeight: Int + newHeight: Int + count: Int + multiplier: Float + players: [PlayerOutput] +} + +type SyncBalancesOutput { + seasonStart: timestamp + success: Boolean + message: String + tokenReturns: [TokenReturn] +} + +type PlayerOutput { + address: String + xp: XPOutput +} + +type XPOutput { + initial: Float + accumulated: Float + calculated: Float + seasonal: Float +} + +type BalanceOutput { + executedAt: timestamp + drops: [DropOutput] +} + +type DropOutput { + playerAddress: String + amount: Float +} + diff --git a/hasura/metadata/actions.yaml b/hasura/metadata/actions.yaml index bafbd91335..756c6207ea 100644 --- a/hasura/metadata/actions.yaml +++ b/hasura/metadata/actions.yaml @@ -46,6 +46,10 @@ actions: definition: kind: synchronous handler: '{{ACTION_BASE_ENDPOINT}}/syncAllGuildDiscordMembers' + - name: syncBalances + definition: + kind: synchronous + handler: '{{ACTION_BASE_ENDPOINT}}/syncBalances' - name: syncSourceCredAccounts definition: kind: synchronous @@ -154,4 +158,10 @@ custom_types: - name: DiscordGuildsSyncOutput - name: LinkCeramicProfileNodeResponse - name: UpdateComposeDBProfileResponse + - name: TokenReturn + - name: SyncBalancesOutput + - name: PlayerOutput + - name: XPOutput + - name: BalanceOutput + - name: DropOutput scalars: [] diff --git a/hasura/metadata/tables.yaml b/hasura/metadata/tables.yaml index c65d62f4ed..f1f057c260 100644 --- a/hasura/metadata/tables.yaml +++ b/hasura/metadata/tables.yaml @@ -1470,17 +1470,23 @@ column_config: chain_id: custom_name: chainId + created_at: + custom_name: createdAt guild_id: custom_name: guildId - last_offset: - custom_name: lastOffset + last_block_height: + custom_name: lastBlockHeight safe_address: custom_name: safeAddress + updated_at: + custom_name: updatedAt custom_column_names: chain_id: chainId + created_at: createdAt guild_id: guildId - last_offset: lastOffset + last_block_height: lastBlockHeight safe_address: safeAddress + updated_at: updatedAt custom_root_fields: {} object_relationships: - name: guild @@ -1499,16 +1505,22 @@ schema: public configuration: column_config: + created_at: + custom_name: createdAt player_id: custom_name: playerId seasonal_balance: custom_name: seasonalBalance token_address: custom_name: tokenAddress + updated_at: + custom_name: updatedAt custom_column_names: + created_at: createdAt player_id: playerId seasonal_balance: seasonalBalance token_address: tokenAddress + updated_at: updatedAt custom_root_fields: {} object_relationships: - name: player diff --git a/hasura/migrations/1692017802834_alter_table_public_player_account_add_unique_type_player_id/up.sql b/hasura/migrations/1692017802834_alter_table_public_player_account_add_unique_type_player_id/up.sql index b33d8f3cdc..cd94124d70 100644 --- a/hasura/migrations/1692017802834_alter_table_public_player_account_add_unique_type_player_id/up.sql +++ b/hasura/migrations/1692017802834_alter_table_public_player_account_add_unique_type_player_id/up.sql @@ -1,2 +1,11 @@ -alter table "public"."player_account" drop constraint "Account_identifier_type_key"; -alter table "public"."player_account" add constraint "player_account_type_player_id_key" unique ("type", "player_id"); +ALTER TABLE public.player_account + DROP CONSTRAINT "Account_identifier_type_key" +; + +ALTER TABLE public.player_account + DROP CONSTRAINT IF EXISTS player_account_type_player_id_key +; +ALTER TABLE public.player_account + ADD CONSTRAINT player_account_type_player_id_key + UNIQUE ("type", "player_id") +; diff --git a/hasura/migrations/1694534639232_insert_seed_for_metafam/up.sql b/hasura/migrations/1694534639232_insert_seed_for_metafam/up.sql index 8526363151..be09f262b3 100644 --- a/hasura/migrations/1694534639232_insert_seed_for_metafam/up.sql +++ b/hasura/migrations/1694534639232_insert_seed_for_metafam/up.sql @@ -1 +1,13 @@ -INSERT INTO xp(initial, player_id, token_address, balance) (SELECT total_xp, id, '0xEAeCC18198a475c921B24b8A6c1C1f0f5F3F7EA0', 0 FROM player); +INSERT INTO xp( + initial, + player_id, + token_address, + balance +) ( + SELECT + total_xp, + id, + '0xEAeCC18198a475c921B24b8A6c1C1f0f5F3F7EA0', + 0 + FROM player +); diff --git a/hasura/migrations/1694790536010_set_fk_public_link_type/down.sql b/hasura/migrations/1694790536010_set_fk_public_link_type/down.sql index b0b011d433..9d15bd9ce0 100644 --- a/hasura/migrations/1694790536010_set_fk_public_link_type/down.sql +++ b/hasura/migrations/1694790536010_set_fk_public_link_type/down.sql @@ -1 +1 @@ -alter table "public"."link" drop constraint "link_type_fkey"; +ALTER TABLE public.link DROP CONSTRAINT link_type_fkey; diff --git a/hasura/migrations/1695821615820_copy_guild_urls_into_link_table/down.sql b/hasura/migrations/1695821615820_copy_guild_urls_into_link_table/down.sql index 5967bd5392..53cfdbf9ea 100644 --- a/hasura/migrations/1695821615820_copy_guild_urls_into_link_table/down.sql +++ b/hasura/migrations/1695821615820_copy_guild_urls_into_link_table/down.sql @@ -1 +1 @@ -DELETE FROM link \ No newline at end of file +DELETE FROM link; diff --git a/hasura/migrations/1695821815342_alter_table_public_guild_drop_column_discord_invite_url/down.sql b/hasura/migrations/1695821815342_alter_table_public_guild_drop_column_discord_invite_url/down.sql index 0f4ff86cbb..7e660a1763 100644 --- a/hasura/migrations/1695821815342_alter_table_public_guild_drop_column_discord_invite_url/down.sql +++ b/hasura/migrations/1695821815342_alter_table_public_guild_drop_column_discord_invite_url/down.sql @@ -1,2 +1,2 @@ -alter table "public"."guild" alter column "discord_invite_url" drop not null; -alter table "public"."guild" add column "discord_invite_url" text; +ALTER TABLE public.guild ALTER COLUMN discord_invite_url DROP NOT NULL; +ALTER TABLE public.guild ADD COLUMN discord_invite_url text; diff --git a/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/down.sql b/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/down.sql index 141260651a..c0bfedd5d6 100644 --- a/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/down.sql +++ b/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/down.sql @@ -1,2 +1,2 @@ -alter table "public"."guild" alter column "github_url" drop not null; -alter table "public"."guild" add column "github_url" text; +ALTER TABLE public.guild ALTER COLUMN github_url DROP NOT NULL; +ALTER TABLE public.guild ADD COLUMN github_url text; diff --git a/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/up.sql b/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/up.sql index cf7ab21725..7cc986d549 100644 --- a/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/up.sql +++ b/hasura/migrations/1695821839485_alter_table_public_guild_drop_column_github_url/up.sql @@ -1 +1 @@ -alter table "public"."guild" drop column "github_url" cascade; +ALTER TABLE public.guild DROP COLUMN github_url CASCADE; diff --git a/hasura/migrations/1695821850665_alter_table_public_guild_drop_column_twitter_url/down.sql b/hasura/migrations/1695821850665_alter_table_public_guild_drop_column_twitter_url/down.sql index e8b443960c..b1b470098c 100644 --- a/hasura/migrations/1695821850665_alter_table_public_guild_drop_column_twitter_url/down.sql +++ b/hasura/migrations/1695821850665_alter_table_public_guild_drop_column_twitter_url/down.sql @@ -1,2 +1,2 @@ -alter table "public"."guild" alter column "twitter_url" drop not null; -alter table "public"."guild" add column "twitter_url" text; +ALTER TABLE public.guild ALTER COLUMN twitter_url DROP NOT NULL; +ALTER TABLE public.guild ADD COLUMN twitter_url text; diff --git a/hasura/migrations/1695829101657_alter_table_public_guild_metadata_alter_column_discord_id/down.sql b/hasura/migrations/1695829101657_alter_table_public_guild_metadata_alter_column_discord_id/down.sql index f69e37a2b6..24a617b139 100644 --- a/hasura/migrations/1695829101657_alter_table_public_guild_metadata_alter_column_discord_id/down.sql +++ b/hasura/migrations/1695829101657_alter_table_public_guild_metadata_alter_column_discord_id/down.sql @@ -1 +1 @@ -alter table "public"."guild_metadata" alter column "discord_id" set not null; +ALTER TABLE public.guild_metadata ALTER COLUMN discord_id SET NOT NULL; diff --git a/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/down.sql b/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/down.sql index bd1b7189b5..9a26dacdf9 100644 --- a/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/down.sql +++ b/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/down.sql @@ -1,4 +1 @@ --- Could not auto-generate a down migration. --- Please write an appropriate down migration for the SQL below: --- alter table "public"."guild" add column "updated_at" timestamptz --- null default now(); +ALTER TABLE public.guild DROP COLUMN updated_at CASCADE; diff --git a/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/up.sql b/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/up.sql index 22aa64dcf5..c051702d20 100644 --- a/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/up.sql +++ b/hasura/migrations/1696771380738_alter_table_public_guild_add_column_updated_at/up.sql @@ -1,2 +1,4 @@ -alter table "public"."guild" add column "updated_at" timestamptz - null default now(); +ALTER TABLE public.guild + ADD COLUMN updated_at timestamptz + NULL DEFAULT now() +; diff --git a/hasura/migrations/1697476299259_alter_table_public_token_add_column_multiplier/down.sql b/hasura/migrations/1697476299259_alter_table_public_token_add_column_multiplier/down.sql new file mode 100644 index 0000000000..a1e0a4baac --- /dev/null +++ b/hasura/migrations/1697476299259_alter_table_public_token_add_column_multiplier/down.sql @@ -0,0 +1 @@ +ALTER TABLE public.token DROP COLUMN multiplier CASCADE; diff --git a/hasura/migrations/1697476299259_alter_table_public_token_add_column_multiplier/up.sql b/hasura/migrations/1697476299259_alter_table_public_token_add_column_multiplier/up.sql new file mode 100644 index 0000000000..b3b66fd87d --- /dev/null +++ b/hasura/migrations/1697476299259_alter_table_public_token_add_column_multiplier/up.sql @@ -0,0 +1,2 @@ +alter table "public"."token" add column "multiplier" float8 + not null default '1'; diff --git a/hasura/migrations/1697476304617_alter_table_public_token_add_column_created_at/down.sql b/hasura/migrations/1697476304617_alter_table_public_token_add_column_created_at/down.sql new file mode 100644 index 0000000000..6d55713535 --- /dev/null +++ b/hasura/migrations/1697476304617_alter_table_public_token_add_column_created_at/down.sql @@ -0,0 +1 @@ +ALTER TABLE public.token DROP COLUMN created_at CASCADE; diff --git a/hasura/migrations/1697476304617_alter_table_public_token_add_column_created_at/up.sql b/hasura/migrations/1697476304617_alter_table_public_token_add_column_created_at/up.sql new file mode 100644 index 0000000000..ab894f5ab3 --- /dev/null +++ b/hasura/migrations/1697476304617_alter_table_public_token_add_column_created_at/up.sql @@ -0,0 +1,4 @@ +ALTER TABLE public.token + ADD COLUMN created_at timestamptz + NULL DEFAULT now() +; diff --git a/hasura/migrations/1697476310113_alter_table_public_token_add_column_updated_at/down.sql b/hasura/migrations/1697476310113_alter_table_public_token_add_column_updated_at/down.sql new file mode 100644 index 0000000000..9c4bc10cfc --- /dev/null +++ b/hasura/migrations/1697476310113_alter_table_public_token_add_column_updated_at/down.sql @@ -0,0 +1,4 @@ +ALTER TABLE public.token DROP COLUMN updated_at CASCADE; +DROP TRIGGER IF EXISTS set_current_timestamp_updated_at + ON public.token +; diff --git a/hasura/migrations/1697476310113_alter_table_public_token_add_column_updated_at/up.sql b/hasura/migrations/1697476310113_alter_table_public_token_add_column_updated_at/up.sql new file mode 100644 index 0000000000..7cfff3f7f9 --- /dev/null +++ b/hasura/migrations/1697476310113_alter_table_public_token_add_column_updated_at/up.sql @@ -0,0 +1,21 @@ +ALTER TABLE public.token + ADD COLUMN updated_at timestamptz + null default now() +; + +CREATE OR REPLACE FUNCTION public.set_current_timestamp_updated_at() +RETURNS TRIGGER AS $$ +DECLARE + _new record; +BEGIN + _new := NEW; + _new.updated_at = NOW(); + RETURN _new; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER set_public_token_updated_at +BEFORE UPDATE ON public.token +FOR EACH ROW +EXECUTE PROCEDURE public.set_current_timestamp_updated_at(); +COMMENT ON TRIGGER set_public_token_updated_at ON public.token +IS 'trigger to set value of column `updated_at` to current timestamp on row update'; diff --git a/hasura/migrations/1697478812713_alter_table_public_xp_add_column_created_at/down.sql b/hasura/migrations/1697478812713_alter_table_public_xp_add_column_created_at/down.sql new file mode 100644 index 0000000000..341c04cc57 --- /dev/null +++ b/hasura/migrations/1697478812713_alter_table_public_xp_add_column_created_at/down.sql @@ -0,0 +1 @@ +ALTER TABLE public.xp DROP COLUMN created_at CASCADE; diff --git a/hasura/migrations/1697478812713_alter_table_public_xp_add_column_created_at/up.sql b/hasura/migrations/1697478812713_alter_table_public_xp_add_column_created_at/up.sql new file mode 100644 index 0000000000..df6a9a4854 --- /dev/null +++ b/hasura/migrations/1697478812713_alter_table_public_xp_add_column_created_at/up.sql @@ -0,0 +1,4 @@ +ALTER TABLE public.xp + ADD COLUMN created_at timestamptz + NOT NULL DEFAULT now() +; diff --git a/hasura/migrations/1697478824592_alter_table_public_xp_add_column_updated_at/down.sql b/hasura/migrations/1697478824592_alter_table_public_xp_add_column_updated_at/down.sql new file mode 100644 index 0000000000..47d5690ebb --- /dev/null +++ b/hasura/migrations/1697478824592_alter_table_public_xp_add_column_updated_at/down.sql @@ -0,0 +1,4 @@ +ALTER TABLE public.xp DROP COLUMN updated_at CASCADE; +DROP TRIGGER IF EXISTS set_current_timestamp_updated_at + ON public.xp +; diff --git a/hasura/migrations/1697478824592_alter_table_public_xp_add_column_updated_at/up.sql b/hasura/migrations/1697478824592_alter_table_public_xp_add_column_updated_at/up.sql new file mode 100644 index 0000000000..f3dfcb0fec --- /dev/null +++ b/hasura/migrations/1697478824592_alter_table_public_xp_add_column_updated_at/up.sql @@ -0,0 +1,19 @@ +alter table "public"."xp" add column "updated_at" timestamptz + not null default now(); + +CREATE OR REPLACE FUNCTION "public"."set_current_timestamp_updated_at"() +RETURNS TRIGGER AS $$ +DECLARE + _new record; +BEGIN + _new := NEW; + _new."updated_at" = NOW(); + RETURN _new; +END; +$$ LANGUAGE plpgsql; +CREATE TRIGGER "set_public_xp_updated_at" +BEFORE UPDATE ON "public"."xp" +FOR EACH ROW +EXECUTE PROCEDURE "public"."set_current_timestamp_updated_at"(); +COMMENT ON TRIGGER "set_public_xp_updated_at" ON "public"."xp" +IS 'trigger to set value of column "updated_at" to current timestamp on row update'; diff --git a/hasura/migrations/1698081953289_alter_table_public_token_alter_column_last_offset/down.sql b/hasura/migrations/1698081953289_alter_table_public_token_alter_column_last_offset/down.sql new file mode 100644 index 0000000000..b1e0f147df --- /dev/null +++ b/hasura/migrations/1698081953289_alter_table_public_token_alter_column_last_offset/down.sql @@ -0,0 +1 @@ +alter table "public"."token" rename column "last_block_height" to "last_offset"; diff --git a/hasura/migrations/1698081953289_alter_table_public_token_alter_column_last_offset/up.sql b/hasura/migrations/1698081953289_alter_table_public_token_alter_column_last_offset/up.sql new file mode 100644 index 0000000000..511bed1d73 --- /dev/null +++ b/hasura/migrations/1698081953289_alter_table_public_token_alter_column_last_offset/up.sql @@ -0,0 +1 @@ +alter table "public"."token" rename column "last_offset" to "last_block_height"; diff --git a/hasura/migrations/1698237245255_insert_into_public_QuestStatus/down.sql b/hasura/migrations/1698237245255_insert_into_public_QuestStatus/down.sql new file mode 100644 index 0000000000..915ce95c2d --- /dev/null +++ b/hasura/migrations/1698237245255_insert_into_public_QuestStatus/down.sql @@ -0,0 +1 @@ +DELETE FROM "public"."QuestStatus" WHERE "status" = 'ARCHIVED'; diff --git a/hasura/migrations/1698237245255_insert_into_public_QuestStatus/up.sql b/hasura/migrations/1698237245255_insert_into_public_QuestStatus/up.sql new file mode 100644 index 0000000000..36e642a8b9 --- /dev/null +++ b/hasura/migrations/1698237245255_insert_into_public_QuestStatus/up.sql @@ -0,0 +1 @@ +INSERT INTO "public"."QuestStatus"("status") VALUES (E'ARCHIVED'); diff --git a/package.json b/package.json index 6126e139f4..e79d8081a3 100644 --- a/package.json +++ b/package.json @@ -13,7 +13,7 @@ "docker:build": "docker-compose up --build -d", "docker:stop": "docker-compose down", "docker:clean": "docker-compose down -v", - "docker:dev": "COMPOSE_DOCKER_CLI_BUILD=1 DOCKER_BUILDKIT=1 docker-compose up --build", + "docker:dev": "DOCKER_BUILDKIT=1 docker compose up --build", "build": "lerna run --concurrency 1 --stream build", "web:dev": "lerna run dev --parallel --scope @metafam/web --include-dependencies", "web:build": "lerna run build --scope @metafam/web --include-dependencies --stream", @@ -63,12 +63,13 @@ "@graphql-codegen/typescript-resolvers": "^4.0.1", "@graphql-codegen/typescript-urql": "^3.7.3", "@types/jest": "^29.2.1", + "@types/node": "^20.8.6", "@types/node-fetch": "^2.6.2", "@types/react": "^18.0.21", "@types/react-dom": "^18.0.6", "@types/uuid": "8.3.4", "@typescript-eslint/eslint-plugin": "5.45.0", - "@typescript-eslint/parser": "5.45.0", + "@typescript-eslint/parser": "^6.6.0", "caniuse-lite": "^1.0.30001383", "concurrently": "7.0.0", "env-cmd": "10.1.0", @@ -101,7 +102,9 @@ "ethers": "5.7.0", "graphql": "16.8.0", "multihashes": "4.0.3", - "@urql/core": "3.2.2" + "@urql/core": "3.2.2", + "node-gyp": "10.0.1", + "better-sqlite3": "9.0.0" }, "browserslist": [ "defaults", diff --git a/packages/backend/src/handlers/actions/player/syncBalances.ts b/packages/backend/src/handlers/actions/player/syncBalances.ts index 6366931d06..013291474d 100644 --- a/packages/backend/src/handlers/actions/player/syncBalances.ts +++ b/packages/backend/src/handlers/actions/player/syncBalances.ts @@ -1,4 +1,4 @@ -import { getCurrentSeasonStart } from '@metafam/utils'; +import { getCurrentSeasonStart, Maybe } from '@metafam/utils'; import ethers from 'ethers'; import { Request, Response } from 'express'; import fetch from 'node-fetch'; @@ -6,11 +6,11 @@ import fetch from 'node-fetch'; import { client } from '../../../lib/hasuraClient.js'; import { computeRank } from '../../../lib/rankHelpers.js'; -const INVALIDATE_AFTER_DAYS = 4; // number of days after which to recache - type SafeResponse = { + next: Maybe; results: Array<{ origin: string; + blockNumber: number; executionDate: string; transfers: Array<{ to: string; @@ -23,113 +23,193 @@ type SafeResponse = { // @todo return balance of token of player in guild const setBalances = async ({ safeAddress, - offset = 0, + lastBlockHeight = 0, tokenAddress: guildTokenAddress, + chainId = 1, }: { safeAddress: string; - offset?: number; + lastBlockHeight?: number; tokenAddress: string; + chainId: number; }) => { - const res = await fetch( - `https://safe-transaction-polygon.safe.global/api/v1/safes/${safeAddress}/all-transactions/?limit=100&offset=${offset}&executed=true&queued=false&trusted=true`, - ); - - const { results } = (await res.json()) as SafeResponse; + let safeURL; + let minBlockHeight; + let maxBlockHeight; const uniqueDrops: Record> = {}; - const airdrops = results.filter((tx) => tx.origin?.includes('CSV Airdrop')); + do { + const safes = { + 137: + 'https://safe-transaction-polygon.safe.global' + + `/api/v1/safes/${safeAddress}/all-transactions/` + + `?limit=10&executed=true` + + '&queued=false&trusted=true', + }; + safeURL ??= safes[chainId as keyof typeof safes]; + if (!safeURL) { + throw new Error(`No Safe URL for chain #${chainId}.`); + } + + // eslint-disable-next-line no-await-in-loop + const res = await fetch(safeURL); + // eslint-disable-next-line no-await-in-loop + const { next, results } = (await res.json()) as SafeResponse; + safeURL = next; + const heights = results.map(({ blockNumber }) => blockNumber); + minBlockHeight = Math.min(minBlockHeight ?? Infinity, ...heights); + maxBlockHeight = Math.max(maxBlockHeight ?? 0, ...heights); + const airdrops = results.filter((tx) => tx.origin?.includes('CSV Airdrop')); + + airdrops.forEach(({ blockNumber, executionDate, transfers }) => { + if (blockNumber <= lastBlockHeight) return; - airdrops.forEach(({ executionDate, transfers }) => { - transfers?.forEach(({ to, tokenAddress, value }) => { - uniqueDrops[executionDate] ??= {}; - uniqueDrops[executionDate][to] ??= 0; - if (tokenAddress === guildTokenAddress) { - uniqueDrops[executionDate][to] += Number( - ethers.utils.formatEther(value), - ); - } + transfers?.forEach(({ to, tokenAddress, value }) => { + uniqueDrops[executionDate] ??= {}; + uniqueDrops[executionDate][to] ??= 0; + if (tokenAddress === guildTokenAddress) { + uniqueDrops[executionDate][to] += Number( + ethers.utils.formatEther(value), + ); + } + }); }); - }); + } while (!!safeURL && minBlockHeight > lastBlockHeight); - await Promise.all( - Object.entries(uniqueDrops) - .map(([executionDate, drops]) => + const added = await Promise.all( + Object.entries(uniqueDrops).map(async ([executedAt, drops]) => { + const dropsReturned = await Promise.all( Object.entries(drops).map(async ([to, value]) => { - await client.AddBalance({ - amount: value, - executedAt: new Date(executionDate), + const entry = { playerAddress: to, + amount: value, + }; + await client.AddBalance({ + ...entry, + executedAt: new Date(executedAt), tokenAddress: guildTokenAddress, }); + return entry; }), - ) - .flat(), + ); + return { + executedAt: new Date(executedAt).toLocaleString('sv').replace(' ', '@'), + drops: dropsReturned, + }; + }), ); - await client.UpdateLastOffset({ + await client.UpdateLastBlockHeight({ tokenAddress: guildTokenAddress, - offset: offset + results.length, + height: maxBlockHeight, }); + + return { + added, + oldHeight: lastBlockHeight, + newHeight: maxBlockHeight, + }; }; // @todo only query guilds that have a token ID export default async (req: Request, res: Response): Promise => { - const expiration = new Date(); - const invalidateAfterDays = - req.query.invalidate_after_days != null - ? parseInt(req.query.invalidate_after_days as string, 10) - : INVALIDATE_AFTER_DAYS; - expiration.setDate(expiration.getDate() - invalidateAfterDays); - const { token: tokens } = await client.GetTokens(); - await Promise.allSettled( - tokens.map( - async ({ safeAddress, lastOffset: offset, guildId, address }) => { - await setBalances({ safeAddress, offset, tokenAddress: address }); - const { - guild: [{ guild_players: players }], - } = await client.GetGuildMembers({ id: guildId }); - await Promise.all( - players.map(async (player) => { - const total = await client.GetTotalForPlayer({ - tokenAddress: address, - playerAddress: player.Player.ethereumAddress, - }); - const balance = total.balance_aggregate.aggregate?.sum?.amount; + try { + const { token: tokens } = await client.GetTokens(); + const seasonStart = getCurrentSeasonStart(); + const tokenPromiseReturn = await Promise.allSettled( + tokens.map( + async ({ + safeAddress, + lastBlockHeight, + guildId, + address, + chainId, + multiplier, + }) => { + const balancesReturned = await setBalances({ + safeAddress, + lastBlockHeight, + tokenAddress: address, + chainId, + }); + const { + guild: [{ guild_players: players }], + } = await client.GetGuildMembers({ id: guildId }); + const playerReturned = await Promise.all( + players.map( + async ({ + Player: { ethereumAddress: ethAddress, id: playerId }, + }) => { + const total = await client.GetTotalForPlayer({ + tokenAddress: address, + playerAddress: ethAddress, + }); + const balance = + total.balance_aggregate.aggregate?.sum?.amount ?? 0; - const seasonalTotal = await client.GetTotalForPlayer({ - tokenAddress: address, - playerAddress: player.Player.ethereumAddress, - executedAfter: getCurrentSeasonStart(), - }); - const seasonalBalance = - seasonalTotal.balance_aggregate.aggregate?.sum?.amount; + const seasonalTotal = await client.GetTotalForPlayer({ + tokenAddress: address, + playerAddress: ethAddress, + executedAfter: seasonStart, + }); + const seasonalSum = + seasonalTotal.balance_aggregate.aggregate?.sum?.amount ?? 0; + const seasonalBalance = seasonalSum * multiplier; - const { - xp: [{ initial } = { initial: 0 }], - } = await client.GetInitialXP({ playerId: player.Player.id }); + const { + xp: [{ initial } = { initial: 0 }], + } = await client.GetInitialXP({ playerId }); + const calculated = balance * multiplier + initial; - await client.UpsertXP({ - balance: (balance ?? 0) + initial, - playerId: player.Player.id, - tokenAddress: address, - seasonalBalance, - }); - }), - ); - }, - ), - ); - const ranks = await client.GetPlayersByTotalXP(); + await client.UpsertXP({ + balance: calculated, + playerId, + tokenAddress: address, + seasonalBalance, + }); - Promise.allSettled( - ranks.xp.map(async ({ playerId, seasonalBalance, balance }, index) => { - const rank = computeRank(index); - await client.UpdateProfileXP({ - playerId, - seasonXP: seasonalBalance, - totalXP: balance, - rank, - }); - }), - ); - res.json('Complete! XP saved'); + return { + address: ethAddress, + xp: { + initial, + accumulated: balance, + calculated, + seasonal: seasonalBalance, + }, + }; + }, + ), + ); + return { + ...balancesReturned, + multiplier, + count: players.length, + players: playerReturned, + }; + }, + ), + ); + const tokenReturns = tokenPromiseReturn.map((t) => + t.status === 'fulfilled' ? t.value : { status: 'failed' }, + ); + + const { xp } = await client.GetPlayersByTotalXP(); + Promise.allSettled( + xp.map(async ({ playerId, seasonalBalance, balance }, index) => { + const rank = computeRank(index); + await client.UpdateProfileXP({ + playerId, + seasonXP: seasonalBalance, + totalXP: balance, + rank, + }); + }), + ); + res.json({ + success: true, + message: `Successfully synced ${xp.length} users.`, + seasonStart, + tokenReturns, + }); + } catch (err) { + res.status(500).json({ success: false, message: (err as Error).message }); + } }; diff --git a/packages/backend/src/handlers/graphql/mutations/token.ts b/packages/backend/src/handlers/graphql/mutations/token.ts index 44cc7ba0da..b53f53d8a4 100644 --- a/packages/backend/src/handlers/graphql/mutations/token.ts +++ b/packages/backend/src/handlers/graphql/mutations/token.ts @@ -16,16 +16,18 @@ export const TokenMutations = /* GraphQL */ ` id } } - mutation UpdateLastOffset($tokenAddress: String!, $offset: Int!) { + + mutation UpdateLastBlockHeight($tokenAddress: String!, $height: Int!) { update_token( where: { address: { _eq: $tokenAddress } } - _set: { lastOffset: $offset } + _set: { lastBlockHeight: $height } ) { returning { - lastOffset + lastBlockHeight } } } + mutation UpsertXP( $balance: float8! $playerId: uuid! diff --git a/packages/backend/src/handlers/graphql/queries/token.ts b/packages/backend/src/handlers/graphql/queries/token.ts index 246ad66c5a..9691ca89f6 100644 --- a/packages/backend/src/handlers/graphql/queries/token.ts +++ b/packages/backend/src/handlers/graphql/queries/token.ts @@ -5,15 +5,18 @@ export const TokenQueries = /* GraphQL */ ` chainId } } + query GetTokens { token { address chainId safeAddress - lastOffset + lastBlockHeight guildId + multiplier } } + query GetTotalForPlayer( $playerAddress: String! $tokenAddress: String! @@ -33,6 +36,7 @@ export const TokenQueries = /* GraphQL */ ` } } } + query GetInitialXP($playerId: uuid!) { xp(where: { playerId: { _eq: $playerId } }) { initial diff --git a/packages/backend/src/handlers/triggers/types.ts b/packages/backend/src/handlers/triggers/types.ts index 030be78e9b..9587e3f194 100644 --- a/packages/backend/src/handlers/triggers/types.ts +++ b/packages/backend/src/handlers/triggers/types.ts @@ -30,10 +30,7 @@ export interface TriggerPayload { // so that we have compile-time checks if and when these columns / types change. export type GuildRow = Omit< Guild, - | 'joinButtonUrl' - | 'websiteUrl' - | 'discordId' - | 'membershipThroughDiscord' + 'joinButtonUrl' | 'websiteUrl' | 'discordId' | 'membershipThroughDiscord' > & { join_button_url: string; website_url: string; diff --git a/packages/design-system/src/MetaTile.tsx b/packages/design-system/src/MetaTile.tsx index 9d0580d901..e098b46bfd 100644 --- a/packages/design-system/src/MetaTile.tsx +++ b/packages/design-system/src/MetaTile.tsx @@ -1,4 +1,12 @@ -import { Flex, FlexProps, Image, StackProps, VStack } from '@chakra-ui/react'; +import { + Box, + Flex, + FlexProps, + Image, + StackProps, + useBreakpointValue, + VStack, +} from '@chakra-ui/react'; import { Maybe } from '@metafam/utils'; import React, { PropsWithChildren, useEffect, useRef } from 'react'; import VanillaTilt from 'vanilla-tilt'; @@ -114,6 +122,8 @@ export const MetaTilePlaybook = React.forwardRef< data-tilt-easing="cubic-bezier(.03,.98,.52,.99)" h="full" w="full" + minH="480px" + mr={0} borderRightRadius={25} ref={(elem) => { tilt.current = elem; @@ -127,18 +137,19 @@ export const MetaTilePlaybook = React.forwardRef< > ( + ( + { noTilt = false, maxTilt = 6, children, image, index, length, ...props }, + fwdRef, + ) => { + const cardMaxWidth = + useBreakpointValue({ + base: 36, + md: 48, + xl: '15rem', + '2xl': '20rem', + }) || '20rem'; + const cardMinHeight = + useBreakpointValue({ + base: '13.5rem', + md: '18rem', + xl: '22.5rem', + '2xl': '30rem', + }) || '30rem'; + + const tilt = useRef>(null); + useEffect(() => { + if (!noTilt && tilt.current) { + VanillaTilt.init(tilt.current); + } + }, [noTilt]); + + return ( + { + tilt.current = elem; + if (typeof fwdRef === 'function') { + fwdRef(elem); + } else if (fwdRef) { + // eslint-disable-next-line no-param-reassign + fwdRef.current = elem; + } + }} + > + + + {children} + + + + + ); + }, +); + +type MetaTilePathCosmeticOptions = 'edges' | 'overlay'; +interface MetaTilePathCosmeticsProps { + type: MetaTilePathCosmeticOptions; + width?: string | number; +} + +/** + * `MetaTilePathCosmetics` - The cosmetic elements of the MetaTilePathPlaybook component when used in paths & playbooks + * @param type 'edges | overlay' - The type of cosmetic to render + * @param width string | number - Sets the width of the cosmetic overlay + * @returns + */ +export const MetaTilePathCosmetics: React.FC = ({ + type, + width, +}) => { + if (type === 'edges') { + return ( +