diff --git a/go.mod b/go.mod index 3ee28df..6abb2ea 100644 --- a/go.mod +++ b/go.mod @@ -5,19 +5,23 @@ go 1.23 require ( github.com/ClickHouse/clickhouse-go/v2 v2.30.0 github.com/DIMO-Network/clickhouse-infra v0.0.3 + github.com/DIMO-Network/model-garage v0.3.3 github.com/DIMO-Network/nameindexer v0.0.8 github.com/DIMO-Network/shared v0.11.1 github.com/aws/aws-sdk-go-v2 v1.32.2 github.com/aws/aws-sdk-go-v2/credentials v1.17.27 github.com/aws/aws-sdk-go-v2/service/s3 v1.65.3 + github.com/ethereum/go-ethereum v1.14.11 github.com/gofiber/contrib/jwt v1.0.10 github.com/gofiber/fiber/v2 v2.52.5 github.com/gofiber/swagger v1.1.0 + github.com/golang-jwt/jwt/v5 v5.2.1 github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 github.com/prometheus/client_golang v1.19.1 github.com/rs/zerolog v1.33.0 + github.com/stretchr/testify v1.9.0 github.com/swaggo/swag v1.16.4 golang.org/x/sync v0.8.0 google.golang.org/grpc v1.65.0 @@ -26,7 +30,6 @@ require ( require ( github.com/ClickHouse/ch-go v0.61.5 // indirect - github.com/DIMO-Network/model-garage v0.3.3 // indirect github.com/KyleBanks/depth v1.2.1 // indirect github.com/MicahParks/keyfunc/v2 v2.1.0 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect @@ -45,8 +48,8 @@ require ( github.com/aws/smithy-go v1.22.0 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/davecgh/go-spew v1.1.1 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect - github.com/ethereum/go-ethereum v1.14.11 // indirect github.com/friendsofgo/errors v0.9.2 // indirect github.com/go-faster/city v1.0.1 // indirect github.com/go-faster/errors v0.7.1 // indirect @@ -54,7 +57,6 @@ require ( github.com/go-openapi/jsonreference v0.19.6 // indirect github.com/go-openapi/spec v0.20.4 // indirect github.com/go-openapi/swag v0.19.15 // indirect - github.com/golang-jwt/jwt/v5 v5.2.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/holiman/uint256 v1.3.1 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -67,6 +69,7 @@ require ( github.com/paulmach/orb v0.11.1 // indirect github.com/pierrec/lz4/v4 v4.1.21 // indirect github.com/pkg/errors v0.9.1 // indirect + github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.6.1 // indirect github.com/prometheus/common v0.55.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect diff --git a/internal/app/app.go b/internal/app/app.go index a901983..a8da869 100644 --- a/internal/app/app.go +++ b/internal/app/app.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "net/http" + "strconv" "strings" "github.com/DIMO-Network/fetch-api/internal/config" @@ -29,11 +30,16 @@ import ( "google.golang.org/grpc" ) +// CreateWebServer creates a new web server with the given logger and settings. func CreateWebServer(logger *zerolog.Logger, settings *config.Settings) (*fiber.App, error) { if !common.IsHexAddress(settings.VehicleNFTAddress) { return nil, errors.New("invalid vehicle NFT address") } vehicleNFTAddress := common.HexToAddress(settings.VehicleNFTAddress) + chainId, err := strconv.ParseUint(settings.ChainID, 10, 64) + if err != nil { + return nil, fmt.Errorf("failed to parse chain ID: %w", err) + } app := fiber.New(fiber.Config{ ErrorHandler: func(c *fiber.Ctx, err error) error { @@ -75,17 +81,18 @@ func CreateWebServer(logger *zerolog.Logger, settings *config.Settings) (*fiber. } s3Client := s3ClientFromSettings(settings) - - h := httphandler.NewHandler(chConn, s3Client, settings.CloudEventBucket, settings.CloudEventBucket) + vehHandler := httphandler.NewHandler(logger, chConn, s3Client, + settings.CloudEventBucket, settings.EphemeralBucket, vehicleNFTAddress, chainId) // File endpoints - vehicleGroup.Post("/latest-filename/:tokenId", vehiclePriv, jwtAuth, h.GetLatestFileName) - vehicleGroup.Post("/filenames/:tokenId", vehiclePriv, jwtAuth, h.GetFileNames) - vehicleGroup.Post("/files/:tokenId", jwtAuth, vehiclePriv, h.GetFiles) - vehicleGroup.Post("/latest-file/:tokenId", jwtAuth, vehiclePriv, h.GetLatestFile) + vehicleGroup.Post("/latest-filename/:tokenId", vehiclePriv, jwtAuth, vehHandler.GetLatestFileName) + vehicleGroup.Post("/filenames/:tokenId", vehiclePriv, jwtAuth, vehHandler.GetFileNames) + vehicleGroup.Post("/files/:tokenId", jwtAuth, vehiclePriv, vehHandler.GetFiles) + vehicleGroup.Post("/latest-file/:tokenId", jwtAuth, vehiclePriv, vehHandler.GetLatestFile) return app, nil } +// CreateGRPCServer creates a new gRPC server with the given logger and settings. func CreateGRPCServer(logger *zerolog.Logger, settings *config.Settings) (*grpc.Server, error) { chConn, err := chClientFromSettings(settings) if err != nil { diff --git a/internal/config/settings.go b/internal/config/settings.go index 056390a..2c29c23 100644 --- a/internal/config/settings.go +++ b/internal/config/settings.go @@ -11,6 +11,7 @@ type Settings struct { TokenExchangeJWTKeySetURL string `yaml:"TOKEN_EXCHANGE_JWK_KEY_SET_URL"` TokenExchangeIssuer string `yaml:"TOKEN_EXCHANGE_ISSUER_URL"` VehicleNFTAddress string `yaml:"VEHICLE_NFT_ADDRESS"` + ChainID string `yaml:"CHAIN_ID"` CloudEventBucket string `yaml:"CLOUDEVENT_BUCKET"` EphemeralBucket string `yaml:"EPHEMERAL_BUCKET"` S3AWSRegion string `yaml:"S3_AWS_REGION"` diff --git a/internal/fetch/httphandler/httphandler.go b/internal/fetch/httphandler/httphandler.go index 18ea0ae..debe718 100644 --- a/internal/fetch/httphandler/httphandler.go +++ b/internal/fetch/httphandler/httphandler.go @@ -1,28 +1,77 @@ +// Packagge httphandler provides the HTTP handler for the fetch service. package httphandler import ( + "context" + "errors" "fmt" + "strconv" + "time" "github.com/ClickHouse/clickhouse-go/v2" + "github.com/DIMO-Network/model-garage/pkg/cloudevent" + "github.com/DIMO-Network/nameindexer" "github.com/DIMO-Network/nameindexer/pkg/clickhouse/indexrepo" "github.com/aws/aws-sdk-go-v2/service/s3" + "github.com/ethereum/go-ethereum/common" "github.com/gofiber/fiber/v2" + "github.com/rs/zerolog" ) -// Handler contains the dependencies for the HTTP handlers. +var ( + errInternal = errors.New("internal error") + errTimeout = errors.New("request exceeded or is estimated to exceed the maximum execution time") +) + +// Handler is the HTTP handler for the fetch service. type Handler struct { indexService *indexrepo.Service cloudEventBucket string ephemeralBucket string + vehicleAddr common.Address + chainID uint64 + logger *zerolog.Logger } // NewHandler creates a new Handler instance. -func NewHandler(chConn clickhouse.Conn, s3Client *s3.Client, cloudEventBucket, ephemeralBucket string) *Handler { +func NewHandler(logger *zerolog.Logger, chConn clickhouse.Conn, s3Client *s3.Client, + cloudEventBucket, ephemeralBucket string, + vehicleAddr common.Address, chainID uint64, +) *Handler { indexService := indexrepo.New(chConn, s3Client) return &Handler{ indexService: indexService, cloudEventBucket: cloudEventBucket, ephemeralBucket: ephemeralBucket, + vehicleAddr: vehicleAddr, + chainID: chainID, + logger: logger, + } +} + +type searchParams struct { + Type *string `query:"type"` + Source *string `query:"source"` + Producer *string `query:"producer"` + Before time.Time `query:"before"` + After time.Time `query:"after"` + Limit int `query:"limit"` +} + +func (s *searchParams) toSearchOptions(subject cloudevent.NFTDID) indexrepo.SearchOptions { + var primaryFiller *string + if s.Type != nil { + filler := nameindexer.CloudTypeToFiller(*s.Type) + primaryFiller = &filler + } + encodedSubject := nameindexer.EncodeNFTDID(subject) + return indexrepo.SearchOptions{ + Subject: &encodedSubject, + PrimaryFiller: primaryFiller, + Source: s.Source, + Producer: s.Producer, + Before: s.Before, + After: s.After, } } @@ -32,59 +81,68 @@ func NewHandler(chConn clickhouse.Conn, s3Client *s3.Client, cloudEventBucket, e // @Tags files // @Accept json // @Produce json -// @Param request body indexrepo.SearchOptions true "Search criteria for finding the latest file" +// @Param params query SearchParams false "Search parameters" // @Success 200 {object} map[string]string "Returns the latest filename" -// @Failure 400 {object} map[string]string "Invalid request body" +// @Failure 400 {object} map[string]string "Invalid request" // @Failure 500 {object} map[string]string "Server error" -// @Router /latest-filename [post] -func (h *Handler) GetLatestFileName(c *fiber.Ctx) error { - var req indexrepo.SearchOptions - if err := c.BodyParser(&req); err != nil { - return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to parse request body: %v", err), - }) - } - filename, err := h.indexService.GetLatestFileName(c.Context(), req) +// @Router /v1/vehicle/{tokenId}/latest-filename [get] +func (h *Handler) GetLatestFileName(fCtx *fiber.Ctx) error { + tokenID := fCtx.Params("tokenId") + uTokenID, err := strconv.ParseUint(tokenID, 0, 32) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse token ID: %v", err)) + } + + var params searchParams + err = fCtx.QueryParser(¶ms) if err != nil { - return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to get latest file name: %v", err), - }) + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse request query: %v", err)) } - return c.JSON(fiber.Map{ + + opts := params.toSearchOptions(cloudevent.NFTDID{ChainID: h.chainID, ContractAddress: h.vehicleAddr, TokenID: uint32(uTokenID)}) + + filename, err := h.indexService.GetLatestFileName(fCtx.Context(), opts) + if err != nil { + return handleDBError(err, h.logger) + } + + return fCtx.JSON(fiber.Map{ "filename": filename, }) } -type SearchOptionsWithLimit struct { - indexrepo.SearchOptions - Limit int `json:"limit" example:"10"` -} - // GetFileNames handles requests for multiple filenames // @Summary Get multiple filenames based on search criteria // @Description Retrieves a list of filenames that match the provided search options // @Tags files // @Accept json // @Produce json -// @Param request body SearchOptionsWithLimit true "Search criteria and limit for finding files" +// @Param params query SearchParams false "Search parameters" // @Success 200 {object} map[string][]string "Returns list of filenames" -// @Failure 400 {object} map[string]string "Invalid request body" +// @Failure 400 {object} map[string]string "Invalid request" // @Failure 500 {object} map[string]string "Server error" -// @Router /filenames [post] -func (h *Handler) GetFileNames(c *fiber.Ctx) error { - var req SearchOptionsWithLimit - if err := c.BodyParser(&req); err != nil { - return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to parse request body: %v", err), - }) - } - filenames, err := h.indexService.GetFileNames(c.Context(), req.Limit, req.SearchOptions) +// @Router /v1/vehicle/{tokenId}/filenames [get] +func (h *Handler) GetFileNames(fCtx *fiber.Ctx) error { + tokenID := fCtx.Params("tokenId") + uTokenID, err := strconv.ParseUint(tokenID, 0, 32) if err != nil { - return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to get file names: %v", err), - }) + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse token ID: %v", err)) } - return c.JSON(fiber.Map{ + + var params searchParams + err = fCtx.QueryParser(¶ms) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse request query: %v", err)) + } + + opts := params.toSearchOptions(cloudevent.NFTDID{ChainID: h.chainID, ContractAddress: h.vehicleAddr, TokenID: uint32(uTokenID)}) + + filenames, err := h.indexService.GetFileNames(fCtx.Context(), params.Limit, opts) + if err != nil { + return handleDBError(err, h.logger) + } + + return fCtx.JSON(fiber.Map{ "filenames": filenames, }) } @@ -95,25 +153,32 @@ func (h *Handler) GetFileNames(c *fiber.Ctx) error { // @Tags files // @Accept json // @Produce json -// @Param request body SearchOptionsWithLimit true "Search criteria and limit for finding files" +// @Param params query SearchParams false "Search parameters" // @Success 200 {object} map[string][]byte "Returns file data" -// @Failure 400 {object} map[string]string "Invalid request body" +// @Failure 400 {object} map[string]string "Invalid request" // @Failure 500 {object} map[string]string "Server error" -// @Router /files [post] -func (h *Handler) GetFiles(c *fiber.Ctx) error { - var req SearchOptionsWithLimit - if err := c.BodyParser(&req); err != nil { - return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to parse request body: %v", err), - }) - } - data, err := h.indexService.GetData(c.Context(), h.cloudEventBucket, req.Limit, req.SearchOptions) +// @Router /v1/vehicle/{tokenId}/files [get] +func (h *Handler) GetFiles(fCtx *fiber.Ctx) error { + tokenID := fCtx.Params("tokenId") + uTokenID, err := strconv.ParseUint(tokenID, 0, 32) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse token ID: %v", err)) + } + + var params searchParams + err = fCtx.QueryParser(¶ms) if err != nil { - return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to get files: %v", err), - }) + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse request query: %v", err)) } - return c.JSON(fiber.Map{ + + opts := params.toSearchOptions(cloudevent.NFTDID{ChainID: h.chainID, ContractAddress: h.vehicleAddr, TokenID: uint32(uTokenID)}) + + data, err := h.indexService.GetData(fCtx.Context(), h.cloudEventBucket, params.Limit, opts) + if err != nil { + return handleDBError(err, h.logger) + } + + return fCtx.JSON(fiber.Map{ "data": data, }) } @@ -124,25 +189,42 @@ func (h *Handler) GetFiles(c *fiber.Ctx) error { // @Tags files // @Accept json // @Produce json -// @Param request body indexrepo.SearchOptions true "Search criteria for finding the latest file" +// @Param params query SearchParams false "Search parameters" // @Success 200 {object} map[string][]byte "Returns latest file data" -// @Failure 400 {object} map[string]string "Invalid request body" +// @Failure 400 {object} map[string]string "Invalid request" // @Failure 500 {object} map[string]string "Server error" -// @Router /latest-file [post] -func (h *Handler) GetLatestFile(c *fiber.Ctx) error { - var req indexrepo.SearchOptions - if err := c.BodyParser(&req); err != nil { - return c.Status(fiber.StatusBadRequest).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to parse request body: %v", err), - }) - } - data, err := h.indexService.GetLatestData(c.Context(), h.cloudEventBucket, req) +// @Router /v1/vehicle/{tokenId}/latest-file [get] +func (h *Handler) GetLatestFile(fCtx *fiber.Ctx) error { + tokenID := fCtx.Params("tokenId") + uTokenID, err := strconv.ParseUint(tokenID, 0, 32) if err != nil { - return c.Status(fiber.StatusInternalServerError).JSON(fiber.Map{ - "error": fmt.Sprintf("failed to get latest file: %v", err), - }) + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse token ID: %v", err)) } - return c.JSON(fiber.Map{ + + var params searchParams + err = fCtx.QueryParser(¶ms) + if err != nil { + return fiber.NewError(fiber.StatusBadRequest, fmt.Sprintf("failed to parse request query: %v", err)) + } + + opts := params.toSearchOptions(cloudevent.NFTDID{ChainID: h.chainID, ContractAddress: h.vehicleAddr, TokenID: uint32(uTokenID)}) + + data, err := h.indexService.GetLatestData(fCtx.Context(), h.cloudEventBucket, opts) + if err != nil { + return handleDBError(err, h.logger) + } + + return fCtx.JSON(fiber.Map{ "data": data, }) } + +// handleDBError logs the error and returns a generic error message. +func handleDBError(err error, log *zerolog.Logger) error { + if errors.Is(err, context.DeadlineExceeded) { + log.Error().Err(err).Msg("failed to query db") + return errTimeout + } + log.Error().Err(err).Msg("failed to query db") + return errInternal +}