diff --git a/search/api.py b/search/api.py index c2050350..9d75f02d 100644 --- a/search/api.py +++ b/search/api.py @@ -292,8 +292,9 @@ def refresh(request: Notes, _: Settings = Depends(get_settings)): # in the index metadata ids_to_fetch = df.note_path.apply(urllib.parse.quote).tolist() # split in chunks of n because fetch has a limit of size - n = 500 + n = 200 ids_to_fetch = [ids_to_fetch[i : i + n] for i in range(0, len(ids_to_fetch), n)] + logger.info(f"Fetching {len(ids_to_fetch)} chunks of {n} ids") with ThreadPool(len(ids_to_fetch)) as pool: existing_documents = pool.map( lambda n: index.fetch(ids=n, namespace=request.namespace), ids_to_fetch diff --git a/service.prod.yaml b/service.prod.yaml index 51438f48..2dc023b0 100644 --- a/service.prod.yaml +++ b/service.prod.yaml @@ -23,10 +23,10 @@ spec: successThreshold: 1 failureThreshold: 60 - image: gcr.io/obsidian-ai/obsidian-search:0.3.1 + image: gcr.io/obsidian-ai/obsidian-search:0.3.2 env: - name: SENTRY_RELEASE - value: "0.3.1" + value: "0.3.2" - name: ENVIRONMENT value: "production" - name: UPLOAD_BATCH_SIZE