diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 284673d946690..23c962959271c 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1,7 +1,7 @@ blank_issues_enabled: false contact_links: - name: "๐Ÿ“š Read docs" - url: https://docs.jina.ai/ + url: https://jina.ai/serve/ about: Find your solution from our documenations - name: "๐Ÿ˜Š Join us" url: https://career.jina.ai diff --git a/.github/slack-pypi.json b/.github/slack-pypi.json index 298fa6bd99ee2..2e9d4bc8b3d29 100644 --- a/.github/slack-pypi.json +++ b/.github/slack-pypi.json @@ -16,7 +16,7 @@ }, "accessory": { "type": "image", - "image_url": "https://docs.jina.ai/_static/favicon.png", + "image_url": "https://jina.ai/serve/_static/favicon.png", "alt_text": "cute cat" } }, diff --git a/CHANGELOG.md b/CHANGELOG.md index d60bd57dd9202..9d5d4f413c397 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9569,7 +9569,7 @@ Jina is released on every Friday evening. The PyPi package and Docker Image will - [[```4273af8d```](https://github.com/jina-ai/jina/commit/4273af8d46394f476423fd53c6bc4054050fd9cf)] __-__ remove hub-builder success (*Han Xiao*) - [[```73457b17```](https://github.com/jina-ai/jina/commit/73457b17909b68c4415613ed8da78f2e6f9774a3)] __-__ hide my exec collide with other test (#2654) (*Joan Fontanals*) - [[```e01ed315```](https://github.com/jina-ai/jina/commit/e01ed3152509b47a896d05d1d6d59ae41acb0515)] __-__ latency-tracking adapt new release (#2595) (*Alan Zhisheng Niu*) - - [[```7651bb44```](https://github.com/jina-ai/jina/commit/7651bb44e725002da65bda8a10d3b4477d692935)] __-__ replace docs2.jina.ai to docs.jina.ai (*Han Xiao*) + - [[```7651bb44```](https://github.com/jina-ai/jina/commit/7651bb44e725002da65bda8a10d3b4477d692935)] __-__ replace docs2.jina.ai to jina.ai/serve (*Han Xiao*) - [[```26403122```](https://github.com/jina-ai/jina/commit/264031226563e6b84073c4b3a168fa5c1e2de1d0)] __-__ fix 404 page generation in ci (*Han Xiao*) ### ๐Ÿน Other Improvements diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ca5d5494cffdc..b908c0826d727 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -256,9 +256,9 @@ Bonus: **Know when to break the rules**. Documentation writing is as much art as [MyST](https://myst-parser.readthedocs.io/en/latest/) Elements Usage -1. Use the `{tab}` element to show multiple ways of doing one thing. [Example](https://docs.jina.ai/concepts/flow/basics/#start-and-stop) -2. Use the `{admonition}` boxes with care. We recommend restricting yourself to [Hint](https://docs.jina.ai/concepts/flow/basics/#create), [Caution](https://docs.jina.ai/concepts/gateway/customize-http-endpoints/#enable-graphql-endpoint) and [See Also](https://docs.jina.ai/concepts/gateway/customize-http-endpoints/#enable-graphql-endpoint). -3. Use `{dropdown}` to hide optional content, such as long code snippets or console output. [Example](https://docs.jina.ai/concepts/client/third-party-clients/#use-curl) +1. Use the `{tab}` element to show multiple ways of doing one thing. [Example](https://jina.ai/serve/concepts/flow/basics/#start-and-stop) +2. Use the `{admonition}` boxes with care. We recommend restricting yourself to [Hint](https://jina.ai/serve/concepts/flow/basics/#create), [Caution](https://jina.ai/serve/concepts/gateway/customize-http-endpoints/#enable-graphql-endpoint) and [See Also](https://jina.ai/serve/concepts/gateway/customize-http-endpoints/#enable-graphql-endpoint). +3. Use `{dropdown}` to hide optional content, such as long code snippets or console output. [Example](https://jina.ai/serve/concepts/client/third-party-clients/#use-curl) ### Building documentation on your local machine diff --git a/Dockerfiles/debianx.Dockerfile b/Dockerfiles/debianx.Dockerfile index 7c89aec38b51f..4c2c414e83d7c 100644 --- a/Dockerfiles/debianx.Dockerfile +++ b/Dockerfiles/debianx.Dockerfile @@ -27,7 +27,7 @@ LABEL org.opencontainers.image.vendor="Jina AI Limited" \ org.opencontainers.image.description="Build multimodal AI services via cloud native technologies" \ org.opencontainers.image.authors="hello@jina.ai" \ org.opencontainers.image.url="https://github.com/jina-ai/jina" \ - org.opencontainers.image.documentation="https://docs.jina.ai" + org.opencontainers.image.documentation="https://jina.ai/serve" # constant, wont invalidate cache ENV PIP_NO_CACHE_DIR=1 \ diff --git a/README.md b/README.md index b1047a1270a17..b742805c6a86b 100644 --- a/README.md +++ b/README.md @@ -32,7 +32,7 @@ Key advantages over FastAPI: pip install jina ``` -See guides for [Apple Silicon](https://docs.jina.ai/get-started/install/apple-silicon-m1-m2/) and [Windows](https://docs.jina.ai/get-started/install/windows/). +See guides for [Apple Silicon](https://jina.ai/serve/get-started/install/apple-silicon-m1-m2/) and [Windows](https://jina.ai/serve/get-started/install/windows/). ## Core Concepts @@ -50,28 +50,31 @@ from jina import Executor, requests from docarray import DocList, BaseDoc from transformers import pipeline + class Prompt(BaseDoc): - text: str + text: str + class Generation(BaseDoc): - prompt: str - text: str + prompt: str + text: str + class StableLM(Executor): - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.generator = pipeline( - 'text-generation', model='stabilityai/stablelm-base-alpha-3b' - ) - - @requests - def generate(self, docs: DocList[Prompt], **kwargs) -> DocList[Generation]: - generations = DocList[Generation]() - prompts = docs.text - llm_outputs = self.generator(prompts) - for prompt, output in zip(prompts, llm_outputs): - generations.append(Generation(prompt=prompt, text=output)) - return generations + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.generator = pipeline( + 'text-generation', model='stabilityai/stablelm-base-alpha-3b' + ) + + @requests + def generate(self, docs: DocList[Prompt], **kwargs) -> DocList[Generation]: + generations = DocList[Generation]() + prompts = docs.text + llm_outputs = self.generator(prompts) + for prompt, output in zip(prompts, llm_outputs): + generations.append(Generation(prompt=prompt, text=output)) + return generations ``` Deploy with Python or YAML: @@ -83,7 +86,7 @@ from executor import StableLM dep = Deployment(uses=StableLM, timeout_ready=-1, port=12345) with dep: - dep.block() + dep.block() ``` ```yaml @@ -115,14 +118,10 @@ Chain services into a Flow: ```python from jina import Flow -flow = ( - Flow(port=12345) - .add(uses=StableLM) - .add(uses=TextToImage) -) +flow = Flow(port=12345).add(uses=StableLM).add(uses=TextToImage) with flow: - flow.block() + flow.block() ``` ## Scaling and Deployment @@ -207,62 +206,66 @@ Enable token-by-token streaming for responsive LLM applications: ```python from docarray import BaseDoc + class PromptDocument(BaseDoc): - prompt: str - max_tokens: int + prompt: str + max_tokens: int + class ModelOutputDocument(BaseDoc): - token_id: int - generated_text: str + token_id: int + generated_text: str ``` 2. Initialize service: ```python from transformers import GPT2Tokenizer, GPT2LMHeadModel + class TokenStreamingExecutor(Executor): - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.model = GPT2LMHeadModel.from_pretrained('gpt2') + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.model = GPT2LMHeadModel.from_pretrained('gpt2') ``` 3. Implement streaming: ```python @requests(on='/stream') async def task(self, doc: PromptDocument, **kwargs) -> ModelOutputDocument: - input = tokenizer(doc.prompt, return_tensors='pt') - input_len = input['input_ids'].shape[1] - for _ in range(doc.max_tokens): - output = self.model.generate(**input, max_new_tokens=1) - if output[0][-1] == tokenizer.eos_token_id: - break - yield ModelOutputDocument( - token_id=output[0][-1], - generated_text=tokenizer.decode( - output[0][input_len:], skip_special_tokens=True - ), - ) - input = { - 'input_ids': output, - 'attention_mask': torch.ones(1, len(output[0])), - } + input = tokenizer(doc.prompt, return_tensors='pt') + input_len = input['input_ids'].shape[1] + for _ in range(doc.max_tokens): + output = self.model.generate(**input, max_new_tokens=1) + if output[0][-1] == tokenizer.eos_token_id: + break + yield ModelOutputDocument( + token_id=output[0][-1], + generated_text=tokenizer.decode( + output[0][input_len:], skip_special_tokens=True + ), + ) + input = { + 'input_ids': output, + 'attention_mask': torch.ones(1, len(output[0])), + } ``` 4. Serve and use: ```python # Server with Deployment(uses=TokenStreamingExecutor, port=12345, protocol='grpc') as dep: - dep.block() + dep.block() + # Client async def main(): - client = Client(port=12345, protocol='grpc', asyncio=True) - async for doc in client.stream_doc( - on='/stream', - inputs=PromptDocument(prompt='what is the capital of France ?', max_tokens=10), - return_type=ModelOutputDocument, - ): - print(doc.generated_text) + client = Client(port=12345, protocol='grpc', asyncio=True) + async for doc in client.stream_doc( + on='/stream', + inputs=PromptDocument(prompt='what is the capital of France ?', max_tokens=10), + return_type=ModelOutputDocument, + ): + print(doc.generated_text) ``` ## Support diff --git a/conda/meta.yaml b/conda/meta.yaml index 44b5cc5b9f437..b7f175ebfaa9d 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -147,7 +147,7 @@ about: license_family: Apache license_file: LICENSE summary: "Build multimodal AI services via cloud native technologies \xB7 Neural Search \xB7 Generative AI \xB7 Cloud Native" - doc_url: https://docs.jina.ai + doc_url: https://jina.ai/serve extra: recipe-maintainers: diff --git a/docs/concepts/jcloud/configuration.md b/docs/concepts/jcloud/configuration.md index 8a859df0ed56e..d00ebb7445958 100644 --- a/docs/concepts/jcloud/configuration.md +++ b/docs/concepts/jcloud/configuration.md @@ -180,7 +180,7 @@ If shards/replicas are used, we will multiply credits further by the number of s ## Scale out Executors -On JCloud, demand-based autoscaling functionality is naturally offered thanks to the underlying Kubernetes architecture. This means that you can maintain [serverless](https://en.wikipedia.org/wiki/Serverless_computing) deployments in a cost-effective way with no headache of setting the [right number of replicas](https://docs.jina.ai/how-to/scale-out/#scale-out-your-executor) anymore! +On JCloud, demand-based autoscaling functionality is naturally offered thanks to the underlying Kubernetes architecture. This means that you can maintain [serverless](https://en.wikipedia.org/wiki/Serverless_computing) deployments in a cost-effective way with no headache of setting the [right number of replicas](https://jina.ai/serve/how-to/scale-out/#scale-out-your-executor) anymore! ### Autoscaling with `jinaai+serverless://` @@ -266,8 +266,8 @@ The JCloud parameters `minAvailable` and `maxUnavailable` ensure that Executors | Name | Default | Allowed | Description | | :--------------- | :-----: | :---------------------------------------------------------------------------------------: | :------------------------------------------------------- | -| `minAvailable` | N/A | Lower than number of [replicas](https://docs.jina.ai/concepts/flow/scale-out/#scale-out) | Minimum number of replicas available during disruption | -| `maxUnavailable` | N/A | Lower than numbers of [replicas](https://docs.jina.ai/concepts/flow/scale-out/#scale-out) | Maximum number of replicas unavailable during disruption | +| `minAvailable` | N/A | Lower than number of [replicas](https://jina.ai/serve/concepts/flow/scale-out/#scale-out) | Minimum number of replicas available during disruption | +| `maxUnavailable` | N/A | Lower than numbers of [replicas](https://jina.ai/serve/concepts/flow/scale-out/#scale-out) | Maximum number of replicas unavailable during disruption | ```{code-block} yaml --- @@ -459,7 +459,7 @@ Keys in `labels` have the following restrictions: ### Monitoring -To enable [tracing support](https://docs.jina.ai/cloud-nativeness/opentelemetry/) in Flows, you can pass `enable: true` argument in the Flow YAML. (Tracing support is not enabled by default in JCloud) +To enable [tracing support](https://jina.ai/serve/cloud-nativeness/opentelemetry/) in Flows, you can pass `enable: true` argument in the Flow YAML. (Tracing support is not enabled by default in JCloud) ```{code-block} yaml --- diff --git a/docs/concepts/jcloud/index.md b/docs/concepts/jcloud/index.md index 632d25c337b57..593bb94bf14c7 100644 --- a/docs/concepts/jcloud/index.md +++ b/docs/concepts/jcloud/index.md @@ -8,7 +8,7 @@ configuration ``` -```{figure} https://docs.jina.ai/_images/jcloud-banner.png +```{figure} https://jina.ai/serve/_images/jcloud-banner.png :width: 0 % :scale: 0 % ``` @@ -50,13 +50,13 @@ For the rest of this section, we use `jc` or `jcloud`. But again they are interc ### Deploy -In Jina's idiom, a project is a [Flow](https://docs.jina.ai/concepts/orchestration/flow/), which represents an end-to-end task such as indexing, searching or recommending. In this document, we use "project" and "Flow" interchangeably. +In Jina's idiom, a project is a [Flow](https://jina.ai/serve/concepts/orchestration/flow/), which represents an end-to-end task such as indexing, searching or recommending. In this document, we use "project" and "Flow" interchangeably. A Flow can have two types of file structure: a single YAML file or a project folder. #### Single YAML file -A self-contained YAML file, consisting of all configuration at the [Flow](https://docs.jina.ai/concepts/orchestration/flow/)-level and [Executor](https://docs.jina.ai/concepts/serving/executor/)-level. +A self-contained YAML file, consisting of all configuration at the [Flow](https://jina.ai/serve/concepts/orchestration/flow/)-level and [Executor](https://jina.ai/serve/concepts/serving/executor/)-level. > All Executors' `uses` must follow the format `jinaai+docker:///MyExecutor` (from [Executor Hub](https://cloud.jina.ai)) to avoid any local file dependencies: @@ -123,7 +123,7 @@ hello/ Where: - `hello/` is your top-level project folder. -- `executor1` directory has all Executor related code/configuration. You can read the best practices for [file structures](https://docs.jina.ai/concepts/serving/executor/file-structure/). Multiple Executor directories can be created. +- `executor1` directory has all Executor related code/configuration. You can read the best practices for [file structures](https://jina.ai/serve/concepts/serving/executor/file-structure/). Multiple Executor directories can be created. - `flow.yml` Your Flow YAML. - `.env` All environment variables used during deployment. @@ -374,7 +374,7 @@ jc secret create mysecret rich-husky-af14064067 --from-literal "{'env-name': 'se ``` ```{tip} -You can optionally pass the `--update` flag to automatically update the Flow spec with the updated secret information. This flag will update the Flow which is hosted on the cloud. Finally, you can also optionally pass a Flow's yaml file path with `--path` to update the yaml file locally. Refer to [this](https://docs.jina.ai/cloud-nativeness/kubernetes/#deploy-flow-with-custom-environment-variables-and-secrets) section for more information. +You can optionally pass the `--update` flag to automatically update the Flow spec with the updated secret information. This flag will update the Flow which is hosted on the cloud. Finally, you can also optionally pass a Flow's yaml file path with `--path` to update the yaml file locally. Refer to [this](https://jina.ai/serve/cloud-nativeness/kubernetes/#deploy-flow-with-custom-environment-variables-and-secrets) section for more information. ``` ```{caution} @@ -419,7 +419,7 @@ jc secret update rich-husky-af14064067 mysecret --from-literal "{'env-name': 'se ``` ```{tip} -You can optionally pass the `--update` flag to automatically update the Flow spec with the updated secret information. This flag will update the Flow which is hosted on the cloud. Finally, you can also optionally pass a Flow's yaml file path with `--path` to update the yaml file locally. Refer to [this](https://docs.jina.ai/cloud-nativeness/kubernetes/#deploy-flow-with-custom-environment-variables-and-secrets) section for more information. +You can optionally pass the `--update` flag to automatically update the Flow spec with the updated secret information. This flag will update the Flow which is hosted on the cloud. Finally, you can also optionally pass a Flow's yaml file path with `--path` to update the yaml file locally. Refer to [this](https://jina.ai/serve/cloud-nativeness/kubernetes/#deploy-flow-with-custom-environment-variables-and-secrets) section for more information. ``` ```{caution} @@ -498,7 +498,7 @@ jcloud: #### Single YAML file -A self-contained YAML file, consisting of all configuration information at the [Deployment](https://docs.jina.ai/concepts/orchestration/deployment/)-level and [Executor](https://docs.jina.ai/concepts/serving/executor/)-level. +A self-contained YAML file, consisting of all configuration information at the [Deployment](https://jina.ai/serve/concepts/orchestration/deployment/)-level and [Executor](https://jina.ai/serve/concepts/serving/executor/)-level. > A Deployment's `uses` parameter must follow the format `jinaai+docker:///MyExecutor` (from [Executor Hub](https://cloud.jina.ai)) to avoid any local file dependencies: diff --git a/docs/concepts/orchestration/flow.md b/docs/concepts/orchestration/flow.md index 702779f38ad97..e08779edf8366 100644 --- a/docs/concepts/orchestration/flow.md +++ b/docs/concepts/orchestration/flow.md @@ -303,7 +303,7 @@ Please follow the walkthrough and enjoy the free GPU/TPU! ```{tip} -Hosing services on Google Colab is not recommended if your server aims to be long-lived or permanent. It is often used for quick experiments, demonstrations or leveraging its free GPU/TPU. For stable, secure and free hosting of your Flow, check out [JCloud](https://docs.jina.ai/concepts/jcloud/). +Hosing services on Google Colab is not recommended if your server aims to be long-lived or permanent. It is often used for quick experiments, demonstrations or leveraging its free GPU/TPU. For stable, secure and free hosting of your Flow, check out [JCloud](https://jina.ai/serve/concepts/jcloud/). ``` ## Export diff --git a/docs/conf.py b/docs/conf.py index c0b3d86e09330..b9c9b92396825 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -49,7 +49,7 @@ html_theme = 'furo' base_url = '/' -html_baseurl = 'https://docs.jina.ai' +html_baseurl = 'https://jina.ai/serve' sitemap_url_scheme = '{link}' sitemap_locales = [None] sitemap_filename = "sitemap.xml" @@ -167,8 +167,8 @@ linkcheck_retries = 2 linkcheck_anchors = False -ogp_site_url = 'https://docs.jina.ai/' -ogp_image = 'https://docs.jina.ai/_static/banner.png' +ogp_site_url = 'https://jina.ai/serve/' +ogp_image = 'https://jina.ai/serve/_static/banner.png' ogp_use_first_image = True ogp_description_length = 300 ogp_type = 'website' diff --git a/docs/html_extra/robots.txt b/docs/html_extra/robots.txt index 114da90890875..401936ca662c2 100644 --- a/docs/html_extra/robots.txt +++ b/docs/html_extra/robots.txt @@ -1,2 +1,2 @@ User-agent: * -sitemap: https://docs.jina.ai/sitemap.xml \ No newline at end of file +sitemap: https://jina.ai/serve/sitemap.xml \ No newline at end of file diff --git a/docs/tutorials/deploy-model.md b/docs/tutorials/deploy-model.md index 54ef22e762b2f..561735798c959 100644 --- a/docs/tutorials/deploy-model.md +++ b/docs/tutorials/deploy-model.md @@ -42,7 +42,7 @@ When you build a model or service in Jina-serve, it's always in the form of an E In this example we need to install: -- The [Jina-serve framework](https://docs.jina.ai/) itself +- The [Jina-serve framework](https://jina.ai/serve/) itself - The dependencies of the specific model we want to serve and deploy ```shell diff --git a/jina/orchestrate/deployments/__init__.py b/jina/orchestrate/deployments/__init__.py index 3eecfd3606aab..1676868db2cae 100644 --- a/jina/orchestrate/deployments/__init__.py +++ b/jina/orchestrate/deployments/__init__.py @@ -394,7 +394,7 @@ def __init__( Note that the recommended way is to only import a single module - a simple python file, if your executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. For more details, please see the - `Executor cookbook `__ + `Executor cookbook `__ :param quiet: If set, then no log will be emitted from this object. :param quiet_error: If set, then exception stack information will not be added to the log :param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node. @@ -402,7 +402,7 @@ def __init__( :param replicas: The number of replicas in the deployment :param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas) :param runtime_cls: The runtime class to run inside the Pod - :param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/concepts/flow/create-flow/#complex-flow-topologies + :param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies :param ssl_certfile: the path to the certificate file :param ssl_keyfile: the path to the key file :param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas diff --git a/jina/orchestrate/flow/base.py b/jina/orchestrate/flow/base.py index 0b0a36d47b3c7..c5bbc5919b8ab 100644 --- a/jina/orchestrate/flow/base.py +++ b/jina/orchestrate/flow/base.py @@ -39,12 +39,7 @@ from jina.clients import Client from jina.clients.mixin import AsyncPostMixin, HealthCheckMixin, PostMixin, ProfileMixin from jina.constants import __default_host__, __windows__ -from jina.enums import ( - DeploymentRoleType, - FlowBuildLevel, - FlowInspectType, - ProtocolType, -) +from jina.enums import DeploymentRoleType, FlowBuildLevel, FlowInspectType, ProtocolType from jina.excepts import ( FlowMissingDeploymentError, FlowTopologyError, @@ -985,7 +980,7 @@ def add( Note that the recommended way is to only import a single module - a simple python file, if your executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. For more details, please see the - `Executor cookbook `__ + `Executor cookbook `__ :param quiet: If set, then no log will be emitted from this object. :param quiet_error: If set, then exception stack information will not be added to the log :param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node. @@ -993,7 +988,7 @@ def add( :param replicas: The number of replicas in the deployment :param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas) :param runtime_cls: The runtime class to run inside the Pod - :param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/concepts/flow/create-flow/#complex-flow-topologies + :param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies :param ssl_certfile: the path to the certificate file :param ssl_keyfile: the path to the key file :param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas @@ -1149,7 +1144,7 @@ def add( Note that the recommended way is to only import a single module - a simple python file, if your executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. For more details, please see the - `Executor cookbook `__ + `Executor cookbook `__ :param quiet: If set, then no log will be emitted from this object. :param quiet_error: If set, then exception stack information will not be added to the log :param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node. @@ -1157,7 +1152,7 @@ def add( :param replicas: The number of replicas in the deployment :param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas) :param runtime_cls: The runtime class to run inside the Pod - :param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/concepts/flow/create-flow/#complex-flow-topologies + :param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies :param ssl_certfile: the path to the certificate file :param ssl_keyfile: the path to the key file :param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas @@ -1781,8 +1776,10 @@ def build(self, copy_flow: bool = False, **kwargs) -> 'Flow': op_flow._deployment_nodes[GATEWAY_NAME].args.graph_description = json.dumps( op_flow._get_graph_representation() ) - op_flow._deployment_nodes[GATEWAY_NAME].args.deployments_addresses = ( - json.dumps(op_flow._get_deployments_addresses()) + op_flow._deployment_nodes[ + GATEWAY_NAME + ].args.deployments_addresses = json.dumps( + op_flow._get_deployments_addresses() ) op_flow._deployment_nodes[GATEWAY_NAME].update_pod_args() diff --git a/jina/parsers/base.py b/jina/parsers/base.py index 31653575b88e0..0df8b09f0bffc 100644 --- a/jina/parsers/base.py +++ b/jina/parsers/base.py @@ -16,7 +16,7 @@ def set_base_parser(): # create the top-level parser urls = { 'Code': ('๐Ÿ’ป', 'https://oss.jina.ai'), - 'Docs': ('๐Ÿ“–', 'https://docs.jina.ai'), + 'Docs': ('๐Ÿ“–', 'https://jina.ai/serve'), 'Help': ('๐Ÿ’ฌ', 'https://discord.jina.ai'), 'Hiring!': ('๐Ÿ™Œ', 'https://jobs.jina.ai'), } diff --git a/jina/parsers/orchestrate/base.py b/jina/parsers/orchestrate/base.py index 91bac5aba1dda..6c821dee283b8 100644 --- a/jina/parsers/orchestrate/base.py +++ b/jina/parsers/orchestrate/base.py @@ -151,7 +151,7 @@ def mixin_scalable_deployment_parser(parser, default_name=None): type=int, default=1, help='The number of shards in the deployment running at the same time. For more details check ' - 'https://docs.jina.ai/concepts/flow/create-flow/#complex-flow-topologies', + 'https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies', ) gp.add_argument( diff --git a/jina/parsers/orchestrate/runtimes/worker.py b/jina/parsers/orchestrate/runtimes/worker.py index 07645fd9959e0..f6da034456fb4 100644 --- a/jina/parsers/orchestrate/runtimes/worker.py +++ b/jina/parsers/orchestrate/runtimes/worker.py @@ -83,7 +83,7 @@ def mixin_worker_runtime_parser(parser): Note that the recommended way is to only import a single module - a simple python file, if your executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. For more details, please see the -`Executor cookbook `__ +`Executor cookbook `__ ''', ) diff --git a/jina/proto/docarray_v1/build-proto.sh b/jina/proto/docarray_v1/build-proto.sh index bdee4a25fb771..b8aa3cf9aecef 100755 --- a/jina/proto/docarray_v1/build-proto.sh +++ b/jina/proto/docarray_v1/build-proto.sh @@ -29,7 +29,7 @@ VER_FILE=../__init__.py if [ "$#" -ne 1 ] && [ "$#" -ne 2 ]; then echo "Error: Please specify the [PATH_TO_GRPC_PYTHON_PLUGIN], refer more details at " \ - "https://docs.jina.ai/" + "https://jina.ai/serve/" printf "\n" echo "USAGE:" printf "\t" diff --git a/jina/proto/docarray_v2/build-proto.sh b/jina/proto/docarray_v2/build-proto.sh index 9e286ecca9b9e..06638f41d84ed 100755 --- a/jina/proto/docarray_v2/build-proto.sh +++ b/jina/proto/docarray_v2/build-proto.sh @@ -29,7 +29,7 @@ VER_FILE=../__init__.py if [ "$#" -ne 1 ] && [ "$#" -ne 2 ]; then echo "Error: Please specify the [PATH_TO_GRPC_PYTHON_PLUGIN], refer more details at " \ - "https://docs.jina.ai/" + "https://jina.ai/serve/" printf "\n" echo "USAGE:" printf "\t" diff --git a/jina/serve/consensus/add_voter/build-add-voter-proto.sh b/jina/serve/consensus/add_voter/build-add-voter-proto.sh index 388e4344cd5a7..48d18867e6bac 100644 --- a/jina/serve/consensus/add_voter/build-add-voter-proto.sh +++ b/jina/serve/consensus/add_voter/build-add-voter-proto.sh @@ -24,7 +24,7 @@ OUT_FOLDER="${PB_NAME}/" if [ "$#" -ne 1 ] && [ "$#" -ne 2 ]; then echo "Error: Please specify the [PATH_TO_GRPC_PYTHON_PLUGIN], refer more details at " \ - "https://docs.jina.ai/" + "https://jina.ai/serve/" printf "\n" echo "USAGE:" printf "\t" diff --git a/jina/serve/executors/__init__.py b/jina/serve/executors/__init__.py index 47ee7d6c22ffd..f3ee7ebbc2666 100644 --- a/jina/serve/executors/__init__.py +++ b/jina/serve/executors/__init__.py @@ -401,10 +401,10 @@ def __init__( self._init_monitoring() self._init_workspace = workspace if __dry_run_endpoint__ not in self.requests: - self.requests[__dry_run_endpoint__] = ( - _FunctionWithSchema.get_function_with_schema( - self.__class__._dry_run_func - ) + self.requests[ + __dry_run_endpoint__ + ] = _FunctionWithSchema.get_function_with_schema( + self.__class__._dry_run_func ) else: self.logger.warning( @@ -412,10 +412,10 @@ def __init__( f' So it is recommended not to expose this endpoint. ' ) if type(self) == BaseExecutor: - self.requests[__default_endpoint__] = ( - _FunctionWithSchema.get_function_with_schema( - self.__class__._dry_run_func - ) + self.requests[ + __default_endpoint__ + ] = _FunctionWithSchema.get_function_with_schema( + self.__class__._dry_run_func ) self._lock = contextlib.AsyncExitStack() @@ -595,14 +595,14 @@ def _add_requests(self, _requests: Optional[Dict]): _func = getattr(self.__class__, func) if callable(_func): # the target function is not decorated with `@requests` yet - self.requests[endpoint] = ( - _FunctionWithSchema.get_function_with_schema(_func) - ) + self.requests[ + endpoint + ] = _FunctionWithSchema.get_function_with_schema(_func) elif typename(_func) == 'jina.executors.decorators.FunctionMapper': # the target function is already decorated with `@requests`, need unwrap with `.fn` - self.requests[endpoint] = ( - _FunctionWithSchema.get_function_with_schema(_func.fn) - ) + self.requests[ + endpoint + ] = _FunctionWithSchema.get_function_with_schema(_func.fn) else: raise TypeError( f'expect {typename(self)}.{func} to be a function, but receiving {typename(_func)}' @@ -1134,7 +1134,7 @@ def serve( Note that the recommended way is to only import a single module - a simple python file, if your executor can be defined in a single file, or an ``__init__.py`` file if you have multiple files, which should be structured as a python package. For more details, please see the - `Executor cookbook `__ + `Executor cookbook `__ :param quiet: If set, then no log will be emitted from this object. :param quiet_error: If set, then exception stack information will not be added to the log :param raft_configuration: Dictionary of kwargs arguments that will be passed to the RAFT node as configuration options when starting the RAFT node. @@ -1142,7 +1142,7 @@ def serve( :param replicas: The number of replicas in the deployment :param retries: Number of retries per gRPC call. If <0 it defaults to max(3, num_replicas) :param runtime_cls: The runtime class to run inside the Pod - :param shards: The number of shards in the deployment running at the same time. For more details check https://docs.jina.ai/concepts/flow/create-flow/#complex-flow-topologies + :param shards: The number of shards in the deployment running at the same time. For more details check https://jina.ai/serve/concepts/flow/create-flow/#complex-flow-topologies :param ssl_certfile: the path to the certificate file :param ssl_keyfile: the path to the key file :param stateful: If set, start consensus module to make sure write operations are properly replicated between all the replicas @@ -1213,12 +1213,12 @@ def serve( to main thread. :param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's dynamic_batching field :param reload: a flag indicating if the Executor should watch the Python files of its implementation to reload the code live while serving. - :param kwargs: other kwargs accepted by the Flow, full list can be found `here ` + :param kwargs: other kwargs accepted by the Flow, full list can be found `here ` """ warnings.warn( f'Executor.serve() is no more supported and will be deprecated soon. Use Deployment to serve an Executor instead: ' - f'https://docs.jina.ai/concepts/executor/serve/', + f'https://jina.ai/serve/concepts/executor/serve/', DeprecationWarning, ) from jina.orchestrate.deployments import Deployment @@ -1271,11 +1271,11 @@ def to_kubernetes_yaml( :param uses_metas: dictionary of parameters to overwrite from the default config's metas field :param uses_requests: dictionary of parameters to overwrite from the default config's requests field :param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's dynamic_batching field - :param kwargs: other kwargs accepted by the Flow, full list can be found `here ` + :param kwargs: other kwargs accepted by the Flow, full list can be found `here ` """ warnings.warn( f'Executor.to_kubernetes_yaml() is no more supported and will be deprecated soon. Use Deployment to export kubernetes YAML files: ' - f'https://docs.jina.ai/concepts/executor/serve/#serve-via-kubernetes', + f'https://jina.ai/serve/concepts/executor/serve/#serve-via-kubernetes', DeprecationWarning, ) from jina.orchestrate.flow.base import Flow @@ -1319,12 +1319,12 @@ def to_docker_compose_yaml( :param uses_metas: dictionary of parameters to overwrite from the default config's metas field :param uses_requests: dictionary of parameters to overwrite from the default config's requests field :param uses_dynamic_batching: dictionary of parameters to overwrite from the default config's requests field - :param kwargs: other kwargs accepted by the Flow, full list can be found `here ` + :param kwargs: other kwargs accepted by the Flow, full list can be found `here ` """ warnings.warn( f'Executor.to_docker_compose_yaml() is no more supported and will be deprecated soon. Use Deployment to export docker compose YAML files: ' - f'https://docs.jina.ai/concepts/executor/serve/#serve-via-docker-compose', + f'https://jina.ai/serve/concepts/executor/serve/#serve-via-docker-compose', DeprecationWarning, ) diff --git a/jina/serve/runtimes/gateway/models.py b/jina/serve/runtimes/gateway/models.py index 164c1113c7e00..a85a9a7eb2818 100644 --- a/jina/serve/runtimes/gateway/models.py +++ b/jina/serve/runtimes/gateway/models.py @@ -3,11 +3,12 @@ from enum import Enum from types import SimpleNamespace from typing import TYPE_CHECKING, Callable, Dict, List, Optional, Union + from google.protobuf.descriptor import Descriptor, FieldDescriptor from pydantic import BaseConfig, BaseModel, Field, create_model, root_validator -from jina.proto.jina_pb2 import DataRequestProto, JinaInfoProto, RouteProto, StatusProto from jina._docarray import docarray_v2 +from jina.proto.jina_pb2 import DataRequestProto, JinaInfoProto, RouteProto, StatusProto if TYPE_CHECKING: # pragma: no cover from google.protobuf.pyext.cpp_message import GeneratedProtocolMessageType @@ -249,7 +250,7 @@ class JinaRequestModel(BaseModel): None, example=[ {'text': 'hello, world!'}, - {'uri': 'https://docs.jina.ai/_static/logo-light.svg'}, + {'uri': 'https://jina.ai/serve/_static/logo-light.svg'}, ], description=DESCRIPTION_DATA, ) diff --git a/jina_cli/export.py b/jina_cli/export.py index 0844f97757bb5..e39ec1cb6fafe 100644 --- a/jina_cli/export.py +++ b/jina_cli/export.py @@ -26,7 +26,7 @@ def api_to_dict(show_all_args: bool = False): 'source': 'https://github.com/jina-ai/jina/tree/' + os.environ.get('JINA_VCS_VERSION', 'master'), 'url': 'https://jina.ai', - 'docs': 'https://docs.jina.ai', + 'docs': 'https://jina.ai/serve', 'authors': 'dev-team@jina.ai', 'version': __version__, 'methods': [], diff --git a/scripts/create-conda-recipe.py b/scripts/create-conda-recipe.py index bbe141364879d..5553f1ec64750 100644 --- a/scripts/create-conda-recipe.py +++ b/scripts/create-conda-recipe.py @@ -183,7 +183,7 @@ def increase_indent(self, flow=False, *args, **kwargs): 'license_family': 'Apache', 'license_file': 'LICENSE', 'summary': 'Build multimodal AI services via cloud native technologies ยท Neural Search ยท Generative AI ยท Cloud Native', - 'doc_url': 'https://docs.jina.ai', + 'doc_url': 'https://jina.ai/serve', }, 'extra': { 'recipe-maintainers': ['JoanFM', 'nan-wang', 'hanxiao'], diff --git a/setup.py b/setup.py index ea62c8c6b36f0..93cd427075440 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ import os +import platform import subprocess import sys -import platform from os import path from setuptools import Extension, find_packages, setup @@ -14,7 +14,7 @@ LICENSE = 'Apache 2.0' GITHUB_REPO = 'https://github.com/jina-ai/jina/' DOWNLOAD_URL = 'https://github.com/jina-ai/jina/tags' -DOCUMENTATION = 'https://docs.jina.ai' +DOCUMENTATION = 'https://jina.ai/serve' TRACKER = 'https://github.com/jina-ai/jina/issues' if sys.version_info < (3, 7, 0): @@ -144,10 +144,19 @@ def get_extra_requires(path, add_all=True): standard_deps = all_deps['standard'].union(core_deps).union(perf_deps) # uvloop is not supported on windows -perf_deps = {i + ";platform_system!='Windows'" if i.startswith('uvloop') else i for i in perf_deps} -standard_deps = {i + ";platform_system!='Windows'" if i.startswith('uvloop') else i for i in standard_deps} +perf_deps = { + i + ";platform_system!='Windows'" if i.startswith('uvloop') else i + for i in perf_deps +} +standard_deps = { + i + ";platform_system!='Windows'" if i.startswith('uvloop') else i + for i in standard_deps +} for k in ['all', 'devel', 'cicd']: - all_deps[k] = {i + ";platform_system!='Windows'" if i.startswith('uvloop') else i for i in all_deps[k]} + all_deps[k] = { + i + ";platform_system!='Windows'" if i.startswith('uvloop') else i + for i in all_deps[k] + } # by default, final deps is the standard deps, unless specified by env otherwise final_deps = standard_deps @@ -244,6 +253,6 @@ def get_extra_requires(path, add_all=True): 'Tracker': TRACKER, }, keywords='jina cloud-native cross-modal multimodal neural-search query search index elastic neural-network encoding ' - 'embedding serving docker container image video audio deep-learning mlops', + 'embedding serving docker container image video audio deep-learning mlops', **extra_golang_kw, )