Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Sweep: Add xml documentation for LangChain.Providers.Abstractions package where it was missing #182

Closed
wants to merge 17 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
17 commits
Select commit Hold shift + click to select a range
e7c001d
feat: Updated src/Providers/Abstractions/src/Chat/
sweep-ai[bot] Apr 1, 2024
954df98
feat: Updated src/Providers/Abstractions/src/Chat/
sweep-ai[bot] Apr 1, 2024
4f36fb2
feat: Updated src/Providers/Abstractions/src/Commo
sweep-ai[bot] Apr 1, 2024
639e400
feat: Updated src/Providers/Abstractions/src/Embed
sweep-ai[bot] Apr 1, 2024
a9e5b72
feat: Updated src/Providers/Abstractions/src/Image
sweep-ai[bot] Apr 1, 2024
c12acc3
feat: Updated src/Providers/Abstractions/src/TextT
sweep-ai[bot] Apr 1, 2024
7dca14d
feat: Updated src/Providers/Abstractions/src/Speec
sweep-ai[bot] Apr 1, 2024
bd8909f
feat: Updated src/Providers/Abstractions/src/TextT
sweep-ai[bot] Apr 1, 2024
f2578a7
feat: Updated src/Providers/Abstractions/src/Moder
sweep-ai[bot] Apr 1, 2024
aea0364
feat: Updated src/Providers/Abstractions/src/Chat/
sweep-ai[bot] Apr 1, 2024
433661f
feat: Updated src/Providers/Abstractions/src/Chat/
sweep-ai[bot] Apr 1, 2024
9fe2d17
Merge main into sweep/add_xml_documentation_for_langchainprovi
sweep-ai[bot] Apr 1, 2024
bf2b6e6
Merge main into sweep/add_xml_documentation_for_langchainprovi
sweep-ai[bot] Apr 2, 2024
89d546c
Merge main into sweep/add_xml_documentation_for_langchainprovi
sweep-ai[bot] Apr 3, 2024
c7dffc0
Merge main into sweep/add_xml_documentation_for_langchainprovi
sweep-ai[bot] Apr 3, 2024
dcaa33f
Merge main into sweep/add_xml_documentation_for_langchainprovi
sweep-ai[bot] Apr 3, 2024
02bc74f
Merge main into sweep/add_xml_documentation_for_langchainprovi
sweep-ai[bot] Apr 3, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 17 additions & 3 deletions src/Providers/Abstractions/src/Chat/ChatModel.cs
Original file line number Diff line number Diff line change
@@ -1,29 +1,42 @@
// ReSharper disable once CheckNamespace
namespace LangChain.Providers;

/// <summary>
/// Represents an abstract base class for chat models, providing common functionality and event handling.
/// </summary>
/// <param name="id">The unique identifier for the chat model.</param>
public abstract class ChatModel(string id) : Model<ChatSettings>(id), IChatModel<ChatRequest, ChatResponse, ChatSettings>
{
#region Events

/// <summary>
/// Gets or sets the context length for the chat model.
/// </summary>
public virtual int ContextLength { get; protected set; }

/// <inheritdoc cref="IChatModel.PartialResponseGenerated"/>
/// <summary>
/// Occurs when a partial response is generated.
/// </summary>
/// <inheritdoc/>
public event EventHandler<string>? PartialResponseGenerated;

protected void OnPartialResponseGenerated(string token)
{
PartialResponseGenerated?.Invoke(this, token);
}

/// <inheritdoc cref="IChatModel.CompletedResponseGenerated"/>
/// <inheritdoc/>
public event EventHandler<string>? CompletedResponseGenerated;

protected void OnCompletedResponseGenerated(string token)
{
CompletedResponseGenerated?.Invoke(this, token);
}

/// <inheritdoc cref="IChatModel.PromptSent"/>
/// <summary>
/// Occurs when a prompt is sent to the chat model.
/// </summary>
/// <inheritdoc/>
public event EventHandler<string>? PromptSent;


Expand All @@ -34,6 +47,7 @@ protected void OnPromptSent(string prompt)

#endregion

/// <inheritdoc/>
public abstract Task<ChatResponse> GenerateAsync(
ChatRequest request,
ChatSettings? settings = null,
Expand Down
29 changes: 23 additions & 6 deletions src/Providers/Abstractions/src/Chat/IChatModel.cs
Original file line number Diff line number Diff line change
Expand Up @@ -6,34 +6,51 @@
public interface IChatModel : IModel<ChatSettings>
{
/// <summary>
/// Max input tokens for the model.
/// Gets the maximum number of input tokens the model can handle.
/// This property defines the upper limit of tokens that can be processed in a single request.
/// Implementations should ensure that requests do not exceed this limit.
/// </summary>
public int ContextLength { get; }

/// <summary>
/// Occurs when a partial response is generated.
/// </summary>
event EventHandler<string>? PartialResponseGenerated;

/// <summary>
/// Occurs when a completed response is generated.
/// </summary>
event EventHandler<string>? CompletedResponseGenerated;

/// <summary>
/// Occurs when a prompt is sent to the chat model.
/// </summary>
event EventHandler<string>? PromptSent;


/// <summary>
/// Occurs when token generated in streaming mode.
/// </summary>
event EventHandler<string>? PartialResponseGenerated;

Check failure on line 34 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PartialResponseGenerated'

Check failure on line 34 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PartialResponseGenerated'

Check failure on line 34 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PartialResponseGenerated'

Check failure on line 34 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PartialResponseGenerated'

/// <summary>
/// Occurs when completed response generated.
/// </summary>
event EventHandler<string>? CompletedResponseGenerated;

Check failure on line 39 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'CompletedResponseGenerated'

Check failure on line 39 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'CompletedResponseGenerated'

Check failure on line 39 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'CompletedResponseGenerated'

/// <summary>
/// Occurs before prompt is sent to the model.
/// </summary>
event EventHandler<string>? PromptSent;

Check failure on line 44 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PromptSent'

Check failure on line 44 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PromptSent'

Check failure on line 44 in src/Providers/Abstractions/src/Chat/IChatModel.cs

View workflow job for this annotation

GitHub Actions / Build and test / Build, test and publish

The type 'IChatModel' already contains a definition for 'PromptSent'


/// <summary>
/// Run the LLM on the given prompt and input.
/// Asynchronously generates a chat response based on the provided request and settings.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The chat request containing the prompt and any additional information required for generating a response.</param>
/// <param name="settings">Optional chat settings to customize the response generation process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, containing the chat response.</returns>
public Task<ChatResponse> GenerateAsync(
ChatRequest request,
ChatSettings? settings = null,
Expand Down
4 changes: 4 additions & 0 deletions src/Providers/Abstractions/src/Common/IModel.cs
Original file line number Diff line number Diff line change
@@ -1,5 +1,9 @@
namespace LangChain.Providers;

/// <summary>
/// Represents the base interface for models in the LangChain framework. It defines essential properties and methods that all models must implement, including a unique identifier (Id) and usage tracking.
/// Implementing classes are expected to provide specific functionalities and data structures relevant to their domain while adhering to this common structure.
/// </summary>
/// <summary>
/// Defines a common model properties.
/// </summary>
Expand Down
13 changes: 7 additions & 6 deletions src/Providers/Abstractions/src/Embedding/IEmbeddingModel.cs
Original file line number Diff line number Diff line change
Expand Up @@ -3,21 +3,22 @@ namespace LangChain.Providers;

/// <summary>
/// Interface for embedding models.
/// Provides functionality to create embeddings from input data using specific settings.
/// </summary>
public interface IEmbeddingModel : IModel<EmbeddingSettings>
{
/// <summary>
///
/// Gets the maximum length of input that the embedding model can process.
/// </summary>
public int MaximumInputLength { get; }

/// <summary>
///
/// Asynchronously creates embeddings based on the provided request and settings.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The embedding request containing the input data for which embeddings are to be generated.</param>
/// <param name="settings">Optional embedding settings to customize the embedding generation process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, resulting in an EmbeddingResponse object.</returns>
Task<EmbeddingResponse> CreateEmbeddingsAsync(
EmbeddingRequest request,
EmbeddingSettings? settings = null,
Expand Down
10 changes: 5 additions & 5 deletions src/Providers/Abstractions/src/ImageToText/IImageToTextModel.cs
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
namespace LangChain.Providers;

/// <summary>
/// Defines a large language model that can be used for image to text generation.
/// Defines a large language model interface for converting images to text. This interface outlines the contract for models that can interpret visual content and generate corresponding textual descriptions.
/// </summary>
public interface IImageToTextModel : IModel<ImageToTextSettings>
{
/// <summary>
/// Run the LLM on the given image.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The image to text request containing the image and any additional information required for text generation.</param>
/// <param name="settings">Optional settings to customize the text generation process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, resulting in an ImageToTextResponse object containing the generated text.</returns>
public Task<ImageToTextResponse> GenerateTextFromImageAsync(
ImageToTextRequest request,
ImageToTextSettings? settings = null,
Expand Down
14 changes: 7 additions & 7 deletions src/Providers/Abstractions/src/Moderation/IModerationModel.cs
Original file line number Diff line number Diff line change
@@ -1,22 +1,22 @@
namespace LangChain.Providers;

/// <summary>
///
/// Defines the interface for models that perform content moderation. Implementations of this interface should provide mechanisms to assess if given input text violates content policies.
/// </summary>
public interface IModerationModel : IModel
{
/// <summary>
///
/// Gets the recommended size of text chunks for moderation checks. This size is optimal for the model to accurately assess content for policy violations.
/// </summary>
public int RecommendedModerationChunkSize { get; }

/// <summary>
/// Given a input text, returns true if the model classifies it as violating provider content policy.
/// Asynchronously checks if the given input text violates content policies.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The moderation request containing the text to be checked.</param>
/// <param name="settings">Optional moderation settings to customize the check process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, resulting in a ModerationResponse indicating if a violation was found.</returns>
public Task<ModerationResponse> CheckViolationAsync(
ModerationRequest request,
ModerationSettings? settings = null,
Expand Down
Original file line number Diff line number Diff line change
@@ -1,17 +1,17 @@
namespace LangChain.Providers;

/// <summary>
///
/// Defines the interface for models that convert speech to text. Implementations of this interface should provide mechanisms to transcribe spoken language into textual form.
/// </summary>
public interface ISpeechToTextModel : IModel<SpeechToTextSettings>
{
/// <summary>
/// Transcribes audio to text.
/// Asynchronously transcribes audio to text based on the provided request and settings.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The speech to text request containing the audio data and any additional information required for transcription.</param>
/// <param name="settings">Optional settings to customize the transcription process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, resulting in a SpeechToTextResponse object.</returns>
public Task<SpeechToTextResponse> TranscribeAsync(
SpeechToTextRequest request,
SpeechToTextSettings? settings = default,
Expand Down
14 changes: 7 additions & 7 deletions src/Providers/Abstractions/src/TextToImage/ITextToImageModel.cs
Original file line number Diff line number Diff line change
Expand Up @@ -2,22 +2,22 @@
namespace LangChain.Providers;

/// <summary>
///
/// Defines the interface for models that generate images based on textual input. Implementations of this interface should provide mechanisms to interpret text and produce corresponding visual content.
/// </summary>
public interface ITextToImageModel : IModel<TextToImageSettings>
{
/// <summary>
/// Occurs before prompt is sent to the model.
/// Occurs before a prompt is sent to the model, indicating the initiation of the image generation process based on the provided text.
/// </summary>
event EventHandler<string>? PromptSent;

/// <summary>
///
/// Asynchronously generates an image based on the provided text request and settings.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The text to image request containing the text and any additional information required for image generation.</param>
/// <param name="settings">Optional settings to customize the image generation process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, resulting in a TextToImageResponse object containing the generated image.</returns>
public Task<TextToImageResponse> GenerateImageAsync(
TextToImageRequest request,
TextToImageSettings? settings = default,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,17 +2,17 @@
namespace LangChain.Providers;

/// <summary>
///
/// Defines the interface for models that convert text to speech. Implementations of this interface should provide mechanisms to generate speech from textual input.
/// </summary>
public interface ITextToSpeechModel : IModel
{
/// <summary>
///
/// Asynchronously generates speech from the provided text request using specified settings.
/// </summary>
/// <param name="request"></param>
/// <param name="settings"></param>
/// <param name="cancellationToken"></param>
/// <returns></returns>
/// <param name="request">The text to speech request containing the text to be converted into speech.</param>
/// <param name="settings">Optional settings to customize the speech generation process. If null, default settings are used.</param>
/// <param name="cancellationToken">A cancellation token that can be used to cancel the operation.</param>
/// <returns>A task that represents the asynchronous operation, resulting in a TextToSpeechResponse object.</returns>
Task<TextToSpeechResponse> GenerateSpeechAsync(
TextToSpeechRequest request,
TextToSpeechSettings? settings = default,
Expand Down
Loading