Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: allow variable object sizes #18

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 16 additions & 18 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
"io"
"log"
"net/http"

// Register the pprof endpoints under the web server root at /debug/pprof
_ "net/http/pprof"
"os"
Expand All @@ -31,36 +32,32 @@
)

var (
eG errgroup.Group

grpcConnPoolSize = 1
maxConnsPerHost = 100
maxIdleConnsPerHost = 100
MiB = 1024 * 1024

Check failure on line 40 in main.go

View workflow job for this annotation

GitHub Actions / audit

exported var MiB should have comment or be unexported
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Also, can we fix this audit error?

maxRetryDuration = 30 * time.Second
retryMultiplier = 2.0

// MB means 1024 Kb.
MB = 1024 * 1024
Copy link
Owner

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Was this unused?


numOfWorker = flag.Int("worker", 48, "Number of concurrent worker to read")
objectNamePrefix = "" // set to "princer_{size}_files/file_" after flags are parsed
objectNameSuffix = ""
tracerName = "princer-storage-benchmark"

// Flags.
numOfWorker = flag.Int("worker", 48, "Number of concurrent worker to read")
numOfReadCallPerWorker = flag.Int("read-call-per-worker", 1000000, "Number of read call per worker")

maxRetryDuration = 30 * time.Second

retryMultiplier = 2.0

bucketName = flag.String("bucket", "princer-working-dirs", "GCS bucket name.")

// ProjectName denotes gcp project name.
bucketName = flag.String("bucket", "princer-working-dirs", "GCS bucket name.")
ProjectName = flag.String("project", "gcs-fuse-test", "GCP project name.")

clientProtocol = flag.String("client-protocol", "http", "Network protocol.")
objectNamePrefix = "princer_100M_files/file_"
objectNameSuffix = ""
clientProtocol = flag.String("client-protocol", "http", "Network protocol.")
objectSize = flag.String("size", "100M", "Object size portion of prefix")

tracerName = "princer-storage-benchmark"
enableTracing = flag.Bool("enable-tracing", false, "Enable tracing with Cloud Trace export")
enablePprof = flag.Bool("enable-pprof", false, "Enable pprof server")
traceSampleRate = flag.Float64("trace-sample-rate", 1.0, "Sampling rate for Cloud Trace")

eG errgroup.Group
)

// CreateHTTPClient create http storage client.
Expand Down Expand Up @@ -124,7 +121,6 @@

// ReadObject creates reader object corresponding to workerID with the help of bucketHandle.
func ReadObject(ctx context.Context, workerID int, bucketHandle *storage.BucketHandle) (err error) {

objectName := objectNamePrefix + strconv.Itoa(workerID) + objectNameSuffix

for i := 0; i < *numOfReadCallPerWorker; i++ {
Expand Down Expand Up @@ -163,6 +159,8 @@
flag.Parse()
ctx := context.Background()

objectNamePrefix = fmt.Sprintf("princer_%s_files/file_", *objectSize)

if *enableTracing {
cleanup := enableTraceExport(ctx, *traceSampleRate)
defer cleanup()
Expand Down
Loading