From 072770e0e28ccc9611c4cfefefbc200ed1598e97 Mon Sep 17 00:00:00 2001 From: Huzaif ansari <78638431+huzaifansari54@users.noreply.github.com> Date: Tue, 12 Nov 2024 22:28:40 +0530 Subject: [PATCH] hugging face basic llm endpoint is implemneted --- .../langchain_huggingface_example.dart | 22 +++- .../lib/langchain_huggingface.dart | 2 + .../lib/src/llm/huggingface_inference.dart | 53 ++++++++ .../lib/src/llm/llm.dart | 2 + .../lib/src/llm/mappers.dart | 15 +++ .../lib/src/llm/types.dart | 124 ++++++++++++++++++ packages/langchain_huggingface/pubspec.yaml | 3 + 7 files changed, 219 insertions(+), 2 deletions(-) create mode 100644 packages/langchain_huggingface/lib/src/llm/huggingface_inference.dart create mode 100644 packages/langchain_huggingface/lib/src/llm/llm.dart create mode 100644 packages/langchain_huggingface/lib/src/llm/mappers.dart create mode 100644 packages/langchain_huggingface/lib/src/llm/types.dart diff --git a/packages/langchain_huggingface/example/langchain_huggingface_example.dart b/packages/langchain_huggingface/example/langchain_huggingface_example.dart index 21f3e9f2..7189c018 100644 --- a/packages/langchain_huggingface/example/langchain_huggingface_example.dart +++ b/packages/langchain_huggingface/example/langchain_huggingface_example.dart @@ -1,3 +1,21 @@ -void main() { - // TODO +// ignore_for_file: avoid_print, unused_element + +import 'package:langchain_core/chat_models.dart'; + +import 'package:langchain_huggingface/src/llm/huggingface_inference.dart'; + +void main() async { + // Uncomment the example you want to run: + await _example1(); + // await _example2(); +} + +/// The most basic building block of LangChain is calling an LLM on some input. +Future _example1() async { + final huggingFace = HuggingfaceInference( + model: 'gpt2', + apiKey: '...Hugging-face-api-key', + ); + final result = await huggingFace('Who are you?'); + print(result); } diff --git a/packages/langchain_huggingface/lib/langchain_huggingface.dart b/packages/langchain_huggingface/lib/langchain_huggingface.dart index 3f6c9ef0..6a0ab625 100644 --- a/packages/langchain_huggingface/lib/langchain_huggingface.dart +++ b/packages/langchain_huggingface/lib/langchain_huggingface.dart @@ -1,2 +1,4 @@ /// Hugging Face module for LangChain.dart. library; + +export 'src/llm/llm.dart'; diff --git a/packages/langchain_huggingface/lib/src/llm/huggingface_inference.dart b/packages/langchain_huggingface/lib/src/llm/huggingface_inference.dart new file mode 100644 index 00000000..b0d4fe16 --- /dev/null +++ b/packages/langchain_huggingface/lib/src/llm/huggingface_inference.dart @@ -0,0 +1,53 @@ +import 'package:huggingface_client/huggingface_client.dart'; +import 'package:langchain_core/llms.dart'; +import 'package:langchain_core/src/prompts/types.dart'; +import 'package:meta/meta.dart'; +import 'mappers.dart'; +import 'types.dart'; + +@immutable +class HuggingfaceInference extends BaseLLM { + HuggingfaceInference({ + required this.model, + required this.apiKey, + super.defaultOptions = const HuggingFaceOptions(), + }) : _apiClient = InferenceApi(_getclient(apiKey)); + final InferenceApi _apiClient; + final String apiKey; + final String model; + + @override + Future invoke(PromptValue input, + {HuggingFaceOptions? options}) async { + final parameters = ApiQueryNLPTextGeneration( + inputs: input.toString(), + temperature: options?.temperature ?? 1.0, + topK: options?.topK ?? 0, + topP: options?.topP ?? 0.0, + maxTime: options?.maxTime ?? -1.0, + returnFullText: options?.returnFullText ?? true, + repetitionPenalty: options?.repetitionPenalty ?? -1, + doSample: options?.doSample ?? true, + maxNewTokens: options?.maxNewTokens ?? -1, + options: InferenceOptions( + useCache: options?.useCache ?? true, + waitForModel: options?.waitForModel ?? false)); + final result = await _apiClient.queryNLPTextGeneration( + taskParameters: parameters, model: model); + + return result![0]!.toLLMResult(); + } + + static InferenceApiClient _getclient(String apikey) => + HuggingFaceClient.getInferenceClient( + apikey, HuggingFaceClient.inferenceBasePath); + + @override + String get modelType => 'llm'; + @override + Future> tokenize(PromptValue promptValue, + {HuggingFaceOptions? options}) async { + // TODO: implement tokenize + throw UnimplementedError(); + } +} diff --git a/packages/langchain_huggingface/lib/src/llm/llm.dart b/packages/langchain_huggingface/lib/src/llm/llm.dart new file mode 100644 index 00000000..a5022ed2 --- /dev/null +++ b/packages/langchain_huggingface/lib/src/llm/llm.dart @@ -0,0 +1,2 @@ +export 'huggingface_inference.dart'; +export 'types.dart'; diff --git a/packages/langchain_huggingface/lib/src/llm/mappers.dart b/packages/langchain_huggingface/lib/src/llm/mappers.dart new file mode 100644 index 00000000..38eaf6d9 --- /dev/null +++ b/packages/langchain_huggingface/lib/src/llm/mappers.dart @@ -0,0 +1,15 @@ +import 'package:huggingface_client/huggingface_client.dart'; +import 'package:langchain_core/language_models.dart'; +import 'package:langchain_core/llms.dart'; + +extension HuggingFaceResponseMapper on ApiResponseNLPTextGeneration { + //map to + LLMResult toLLMResult() { + return LLMResult( + id: 'id', + output: generatedText, + finishReason: FinishReason.unspecified, + metadata: {}, + usage: const LanguageModelUsage()); + } +} diff --git a/packages/langchain_huggingface/lib/src/llm/types.dart b/packages/langchain_huggingface/lib/src/llm/types.dart new file mode 100644 index 00000000..cb74d1dd --- /dev/null +++ b/packages/langchain_huggingface/lib/src/llm/types.dart @@ -0,0 +1,124 @@ +import 'package:langchain_core/llms.dart'; +import 'package:langchain_core/src/language_models/types.dart'; +import 'package:meta/meta.dart'; + +@immutable +class HuggingFaceOptions extends LLMOptions { + const HuggingFaceOptions( + {this.topK, + this.topP, + super.model, + this.temperature, + this.repetitionPenalty, + this.maxNewTokens, + this.maxTime, + this.returnFullText, + this.numReturnSequences, + this.useCache, + this.waitForModel, + this.doSample}); + + /// (Default: true). Boolean. There is a cache layer on the inference API to speedup requests we have already seen. + /// Most models can use those results as is as models are deterministic (meaning the results will be the same anyway). + /// However if you use a non deterministic model, you can set this parameter to prevent the caching mechanism from being + /// used resulting in a real new query. + final bool? useCache; + + /// (Default: false) Boolean. If the model is not ready, wait for it instead of receiving 503. It limits the number of requests + /// required to get your inference done. It is advised to only set this flag to true after receiving a 503 + /// error as it will limit hanging in your application to known places. + final bool? waitForModel; + + /// (Default: None). Integer to define the top tokens considered within the sample operation to create new text. + final int? topK; + + /// (Default: None). Float to define the tokens that are within the sample operation of text generation. + /// Add tokens in the sample for more probable to least probable until the sum of the probabilities + /// is greater than top_p. + final double? topP; + + /// (Default: 1.0). Float (0.0-100.0). The temperature of the sampling operation. 1 means regular sampling, + /// 0 means always take the highest score, 100.0 is getting closer to uniform probability. + final double? temperature; + + /// (Default: None). Float (0.0-100.0). The more a token is used within generation the more it is penalized + /// to not be picked in successive generation passes. + final double? repetitionPenalty; + + /// (Default: None). Int (0-250). The amount of new tokens to be generated, this does not include the input + /// length it is a estimate of the size of generated text you want. Each new tokens slows down the request, + /// so look for balance between response times and length of text generated. + final int? maxNewTokens; + + /// (Default: None). Float (0-120.0). The amount of time in seconds that the query should take maximum. + /// Network can cause some overhead so it will be a soft limit. Use that in combination + /// with [maxNewTokens] for best results. + final double? maxTime; + + /// (Default: True). Bool. If set to False, the return results will not contain the + /// original query making it easier for prompting. + final bool? returnFullText; + + /// (Default: 1). Integer. The number of proposition you want to be returned. + final int? numReturnSequences; + + /// (Optional: True). Bool. Whether or not to use sampling, use greedy + /// decoding otherwise + final bool? doSample; + + @override + HuggingFaceOptions copyWith( + {final String? model, + final int? concurrencyLimit, + final int? topK, + final double? topP, + final double? temperature, + final double? repetitionPenalty, + final int? maxNewTokens, + final double? maxTime, + final bool? returnFullText, + final int? numReturnSequences, + final bool? doSample}) { + return HuggingFaceOptions( + model: model ?? this.model, + repetitionPenalty: repetitionPenalty ?? this.repetitionPenalty, + returnFullText: returnFullText ?? this.returnFullText, + numReturnSequences: numReturnSequences ?? this.numReturnSequences, + doSample: doSample ?? this.doSample, + topK: topK ?? this.topK, + temperature: temperature ?? this.temperature, + topP: topP ?? this.topP, + maxTime: maxTime ?? this.maxTime, + maxNewTokens: maxNewTokens ?? this.maxNewTokens, + ); + } +} + +//inputs* string +// parameters object +// adapter_id string Lora adapter id +// best_of integer Generate best_of sequences and return the one if the highest token logprobs. +// decoder_input_details boolean Whether to return decoder input token logprobs and ids. +// details boolean Whether to return generation details. +// do_sample boolean Activate logits sampling. +// frequency_penalty number The parameter for frequency penalty. 1.0 means no penalty Penalize new tokens based on their existing frequency in the text so far, decreasing the model’s likelihood to repeat the same line verbatim. +// grammar unknown One of the following: +// (#1) object +// type* enum Possible values: json. +// value* unknown A string that represents a JSON Schema. JSON Schema is a declarative language that allows to annotate JSON documents with types and descriptions. +// (#2) object +// type* enum Possible values: regex. +// value* string +// max_new_tokens integer Maximum number of tokens to generate. +// repetition_penalty number The parameter for repetition penalty. 1.0 means no penalty. See this paper for more details. +// return_full_text boolean Whether to prepend the prompt to the generated text +// seed integer Random sampling seed. +// stop string[] Stop generating tokens if a member of stop is generated. +// temperature number The value used to module the logits distribution. +// top_k integer The number of highest probability vocabulary tokens to keep for top-k-filtering. +// top_n_tokens integer The number of highest probability vocabulary tokens to keep for top-n-filtering. +// top_p number Top-p value for nucleus sampling. +// truncate integer Truncate inputs tokens to the given size. +// typical_p number Typical Decoding mass See Typical Decoding for Natural Language Generation for more information. +// watermark boolean Watermarking with A Watermark for Large Language Models. +// stream \ No newline at end of file diff --git a/packages/langchain_huggingface/pubspec.yaml b/packages/langchain_huggingface/pubspec.yaml index 2f29e62b..4d55f642 100644 --- a/packages/langchain_huggingface/pubspec.yaml +++ b/packages/langchain_huggingface/pubspec.yaml @@ -15,3 +15,6 @@ topics: environment: sdk: ">=3.4.0 <4.0.0" +dependencies: + huggingface_client: ^1.5.0 + langchain_core: ^0.3.6