Skip to content

Commit

Permalink
chore: minor cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
zepatrik committed Dec 2, 2024
1 parent facb8a5 commit 13e2faf
Show file tree
Hide file tree
Showing 3 changed files with 14 additions and 11 deletions.
14 changes: 9 additions & 5 deletions internal/e2e/transaction_cases_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
package e2e

import (
"github.com/stretchr/testify/require"
"strconv"
"testing"

"github.com/ory/x/pointerx"
Expand Down Expand Up @@ -66,20 +68,20 @@ func runTransactionCases(c transactClient, m *namespaceTestManager) func(*testin
m.add(t, ns...)

var tuples []*ketoapi.RelationTuple
for range 12001 {
for i := range 12001 {
tuples = append(tuples, &ketoapi.RelationTuple{
Namespace: ns[0].Name,
Object: "o",
Object: "o" + strconv.Itoa(i),
Relation: "rel",
SubjectSet: &ketoapi.SubjectSet{
Namespace: ns[1].Name,
Object: "o",
Object: "o" + strconv.Itoa(i),
Relation: "rel",
},
},
&ketoapi.RelationTuple{
Namespace: ns[0].Name,
Object: "o",
Object: "o" + strconv.Itoa(i),
Relation: "rel",
SubjectID: pointerx.Ptr("sid"),
},
Expand All @@ -91,8 +93,10 @@ func runTransactionCases(c transactClient, m *namespaceTestManager) func(*testin
resp := c.queryTuple(t, &ketoapi.RelationQuery{
Namespace: &ns[0].Name,
})
// Use this instead, but currently unbearably huge output
// assert.ElementsMatch(t, tuples, resp.RelationTuples)
for i := range tuples {
assert.Contains(t, resp.RelationTuples, tuples[i])
require.Contains(t, resp.RelationTuples, tuples[i])
}

c.transactTuples(t, nil, tuples)
Expand Down
5 changes: 2 additions & 3 deletions internal/persistence/sql/query_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -111,19 +111,18 @@ func TestBuildInsert(t *testing.T) {
func TestBuildInsertUUIDs(t *testing.T) {
t.Parallel()

nid := uuidx.NewV4()
foo, bar, baz := uuidx.NewV4(), uuidx.NewV4(), uuidx.NewV4()
uuids := []UUIDMapping{
{foo, "foo"},
{bar, "bar"},
{baz, "baz"},
}

q, args := buildInsertUUIDs(nid, uuids, "mysql")
q, args := buildInsertUUIDs(uuids, "mysql")
assert.Equal(t, "INSERT IGNORE INTO keto_uuid_mappings (id, string_representation) VALUES (?,?),(?,?),(?,?)", q)
assert.Equal(t, []any{foo, "foo", bar, "bar", baz, "baz"}, args)

q, args = buildInsertUUIDs(nid, uuids, "anything else")
q, args = buildInsertUUIDs(uuids, "anything else")
assert.Equal(t, "INSERT INTO keto_uuid_mappings (id, string_representation) VALUES (?,?),(?,?),(?,?) ON CONFLICT (id) DO NOTHING", q)
assert.Equal(t, []any{foo, "foo", bar, "bar", baz, "baz"}, args)
}
6 changes: 3 additions & 3 deletions internal/persistence/sql/uuid_mapping.go
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ func (p *Persister) MapStringsToUUIDs(ctx context.Context, values ...string) (uu
p.d.Logger().WithField("values", values).WithField("UUIDs", uuids).Trace("adding UUID mappings")

mappings := make([]UUIDMapping, len(values))
for i := range len(values) {
for i := range values {
mappings[i] = UUIDMapping{
ID: uuids[i],
StringRepresentation: values[i],
Expand All @@ -113,7 +113,7 @@ func (p *Persister) MapStringsToUUIDs(ctx context.Context, values ...string) (uu

err = p.Transaction(ctx, func(ctx context.Context) error {
for chunk := range slices.Chunk(mappings, chunkSizeInsertUUIDMappings) {
query, args := buildInsertUUIDs(p.NetworkID(ctx), chunk, p.conn.Dialect.Name())
query, args := buildInsertUUIDs(chunk, p.conn.Dialect.Name())
if err := p.Connection(ctx).RawQuery(query, args...).Exec(); err != nil {
return sqlcon.HandleError(err)
}
Expand Down Expand Up @@ -143,7 +143,7 @@ func (p *Persister) MapUUIDsToStrings(ctx context.Context, u ...uuid.UUID) (_ []
return p.batchFromUUIDs(ctx, u)
}

func buildInsertUUIDs(nid uuid.UUID, values []UUIDMapping, dialect string) (query string, args []any) {
func buildInsertUUIDs(values []UUIDMapping, dialect string) (query string, args []any) {
if len(values) == 0 {
return "", nil
}
Expand Down

0 comments on commit 13e2faf

Please sign in to comment.