Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
feat(sdk): allow input text into the python sdk (#2738)
Browse files Browse the repository at this point in the history
GitOrigin-RevId: 9c36a4ad2fa7bd831920145ecf1c29b16765cc11
jhazenaai committed Nov 20, 2023
1 parent 3f69d0d commit 770c155
Showing 4 changed files with 263 additions and 7 deletions.
31 changes: 31 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -266,6 +266,37 @@ print(result.response)

</details>


<details>
<summary>Use LeMUR to with Input Text</summary>

```python
import assemblyai as aai

transcriber = aai.Transcriber()
config = aai.TranscriptionConfig(
speaker_labels=True,
)
transcript = transcriber.transcribe("https://example.org/customer.mp3", config=config)

# Example converting speaker label utterances into LeMUR input text
text = ""

for utt in transcript.utterances:
text += f"Speaker {utt.speaker}:\n{utt.text}\n"

result = aai.Lemur().task(
"You are a helpful coach. Provide an analysis of the transcript "
"and offer areas to improve with exact quotes. Include no preamble. "
"Start with an overall summary then get into the examples with feedback.",
input_text=text
)

print(result.response)
```

</details>

<details>
<summary>Delete data previously sent to LeMUR</summary>

24 changes: 22 additions & 2 deletions assemblyai/lemur.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,11 @@ def __init__(
) -> None:
self._client = client

self._sources = [types.LemurSourceRequest.from_lemur_source(s) for s in sources]
self._sources = (
[types.LemurSourceRequest.from_lemur_source(s) for s in sources]
if sources is not None
else []
)

def question(
self,
@@ -24,6 +28,7 @@ def question(
final_model: Optional[types.LemurModel],
max_output_size: Optional[int],
temperature: Optional[float],
input_text: Optional[str],
) -> types.LemurQuestionResponse:
response = api.lemur_question(
client=self._client.http_client,
@@ -34,6 +39,7 @@ def question(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
@@ -48,6 +54,7 @@ def summarize(
max_output_size: Optional[int],
timeout: Optional[float],
temperature: Optional[float],
input_text: Optional[str],
) -> types.LemurSummaryResponse:
response = api.lemur_summarize(
client=self._client.http_client,
@@ -58,6 +65,7 @@ def summarize(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
@@ -72,6 +80,7 @@ def action_items(
max_output_size: Optional[int],
timeout: Optional[float],
temperature: Optional[float],
input_text: Optional[str],
) -> types.LemurActionItemsResponse:
response = api.lemur_action_items(
client=self._client.http_client,
@@ -82,6 +91,7 @@ def action_items(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
@@ -95,6 +105,7 @@ def task(
max_output_size: Optional[int],
timeout: Optional[float],
temperature: Optional[float],
input_text: Optional[str],
):
response = api.lemur_task(
client=self._client.http_client,
@@ -104,6 +115,7 @@ def task(
final_model=final_model,
max_output_size=max_output_size,
temperature=temperature,
input_text=input_text,
),
http_timeout=timeout,
)
@@ -121,7 +133,7 @@ class Lemur:

def __init__(
self,
sources: List[types.LemurSource],
sources: Optional[List[types.LemurSource]] = None,
client: Optional[_client.Client] = None,
) -> None:
"""
@@ -147,6 +159,7 @@ def question(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurQuestionResponse:
"""
Question & Answer allows you to ask free form questions about one or many transcripts.
@@ -178,6 +191,7 @@ def question(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

def summarize(
@@ -188,6 +202,7 @@ def summarize(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurSummaryResponse:
"""
Summary allows you to distill a piece of audio into a few impactful sentences.
@@ -214,6 +229,7 @@ def summarize(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

def action_items(
@@ -224,6 +240,7 @@ def action_items(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurActionItemsResponse:
"""
Action Items allows you to generate action items from one or many transcripts.
@@ -251,6 +268,7 @@ def action_items(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

def task(
@@ -260,6 +278,7 @@ def task(
max_output_size: Optional[int] = None,
timeout: Optional[float] = None,
temperature: Optional[float] = None,
input_text: Optional[str] = None,
) -> types.LemurTaskResponse:
"""
Task feature allows you to submit a custom prompt to the model.
@@ -282,6 +301,7 @@ def task(
max_output_size=max_output_size,
timeout=timeout,
temperature=temperature,
input_text=input_text,
)

@classmethod
1 change: 1 addition & 0 deletions assemblyai/types.py
Original file line number Diff line number Diff line change
@@ -1773,6 +1773,7 @@ class BaseLemurRequest(BaseModel):
final_model: Optional[LemurModel]
max_output_size: Optional[int]
temperature: Optional[float]
input_text: Optional[str]


class LemurTaskRequest(BaseLemurRequest):
214 changes: 209 additions & 5 deletions tests/unit/test_lemur.py
Original file line number Diff line number Diff line change
@@ -14,7 +14,7 @@
aai.settings.api_key = "test"


def test_lemur_single_question_succeeds(httpx_mock: HTTPXMock):
def test_lemur_single_question_succeeds_transcript(httpx_mock: HTTPXMock):
"""
Tests whether asking a single question succeeds.
"""
@@ -64,7 +64,54 @@ def test_lemur_single_question_succeeds(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_multiple_question_succeeds(httpx_mock: HTTPXMock):
def test_lemur_single_question_succeeds_input_text(httpx_mock: HTTPXMock):
"""
Tests whether asking a single question succeeds with input text.
"""

# create a mock response of a LemurQuestionResponse
mock_lemur_answer = factories.generate_dict_factory(
factories.LemurQuestionResponse
)()

# we only want to mock one answer
mock_lemur_answer["response"] = [mock_lemur_answer["response"][0]]

# mock the specific endpoints
httpx_mock.add_response(
url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/question-answer",
status_code=httpx.codes.OK,
method="POST",
json=mock_lemur_answer,
)

# prepare the question to be asked
question = aai.LemurQuestion(
question="Which cars do the callers want to buy?",
context="Callers are interested in buying cars",
answer_options=["Toyota", "Honda", "Ford", "Chevrolet"],
)
# test input_text input
# mimic the usage of the SDK
lemur = aai.Lemur()
result = lemur.question(
question, input_text="This transcript is a test transcript."
)

# check whether answer is not a list
assert isinstance(result, aai.LemurQuestionResponse)

answers = result.response

# check the response
assert answers[0].question == mock_lemur_answer["response"][0]["question"]
assert answers[0].answer == mock_lemur_answer["response"][0]["answer"]

# check whether we mocked everything
assert len(httpx_mock.get_requests()) == 1


def test_lemur_multiple_question_succeeds_transcript(httpx_mock: HTTPXMock):
"""
Tests whether asking multiple questions succeeds.
"""
@@ -117,6 +164,59 @@ def test_lemur_multiple_question_succeeds(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_multiple_question_succeeds_input_text(httpx_mock: HTTPXMock):
"""
Tests whether asking multiple questions succeeds.
"""

# create a mock response of a LemurQuestionResponse
mock_lemur_answer = factories.generate_dict_factory(
factories.LemurQuestionResponse
)()

# prepare the questions to be asked
questions = [
aai.LemurQuestion(
question="Which cars do the callers want to buy?",
),
aai.LemurQuestion(
question="What price range are the callers looking for?",
),
]

# update the mock questions with the questions
mock_lemur_answer["response"][0]["question"] = questions[0].question
mock_lemur_answer["response"][1]["question"] = questions[1].question

# mock the specific endpoints
httpx_mock.add_response(
url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/question-answer",
status_code=httpx.codes.OK,
method="POST",
json=mock_lemur_answer,
)

# test input_text input
# mimic the usage of the SDK
lemur = aai.Lemur()
result = lemur.question(
questions, input_text="This transcript is a test transcript."
)
assert isinstance(result, aai.LemurQuestionResponse)

answers = result.response
# check whether answers is a list
assert isinstance(answers, list)

# check the response
for idx, answer in enumerate(answers):
assert answer.question == mock_lemur_answer["response"][idx]["question"]
assert answer.answer == mock_lemur_answer["response"][idx]["answer"]

# check whether we mocked everything
assert len(httpx_mock.get_requests()) == 1


def test_lemur_question_fails(httpx_mock: HTTPXMock):
"""
Tests whether asking a question fails.
@@ -149,7 +249,7 @@ def test_lemur_question_fails(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_summarize_succeeds(httpx_mock: HTTPXMock):
def test_lemur_summarize_succeeds_transcript(httpx_mock: HTTPXMock):
"""
Tests whether summarizing a transcript via LeMUR succeeds.
"""
@@ -184,6 +284,41 @@ def test_lemur_summarize_succeeds(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_summarize_succeeds_input_text(httpx_mock: HTTPXMock):
"""
Tests whether summarizing a transcript via LeMUR succeeds with input text.
"""

# create a mock response of a LemurSummaryResponse
mock_lemur_summary = factories.generate_dict_factory(
factories.LemurSummaryResponse
)()

# mock the specific endpoints
httpx_mock.add_response(
url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/summary",
status_code=httpx.codes.OK,
method="POST",
json=mock_lemur_summary,
)

# test input_text input
lemur = aai.Lemur()
result = lemur.summarize(
context="Callers asking for cars", answer_format="TLDR", input_text="Test test"
)

assert isinstance(result, aai.LemurSummaryResponse)

summary = result.response

# check the response
assert summary == mock_lemur_summary["response"]

# check whether we mocked everything
assert len(httpx_mock.get_requests()) == 1


def test_lemur_summarize_fails(httpx_mock: HTTPXMock):
"""
Tests whether summarizing a transcript via LeMUR fails.
@@ -209,7 +344,7 @@ def test_lemur_summarize_fails(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_action_items_succeeds(httpx_mock: HTTPXMock):
def test_lemur_action_items_succeeds_transcript(httpx_mock: HTTPXMock):
"""
Tests whether generating action items for a transcript via LeMUR succeeds.
"""
@@ -247,6 +382,43 @@ def test_lemur_action_items_succeeds(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_action_items_succeeds_input_text(httpx_mock: HTTPXMock):
"""
Tests whether generating action items for a transcript via LeMUR succeeds.
"""

# create a mock response of a LemurActionItemsResponse
mock_lemur_action_items = factories.generate_dict_factory(
factories.LemurActionItemsResponse
)()

# mock the specific endpoints
httpx_mock.add_response(
url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/action-items",
status_code=httpx.codes.OK,
method="POST",
json=mock_lemur_action_items,
)

# test input_text input
lemur = aai.Lemur()
result = lemur.action_items(
context="Customers asking for help with resolving their problem",
answer_format="Three bullet points",
input_text="Test test",
)

assert isinstance(result, aai.LemurActionItemsResponse)

action_items = result.response

# check the response
assert action_items == mock_lemur_action_items["response"]

# check whether we mocked everything
assert len(httpx_mock.get_requests()) == 1


def test_lemur_action_items_fails(httpx_mock: HTTPXMock):
"""
Tests whether generating action items for a transcript via LeMUR fails.
@@ -275,7 +447,7 @@ def test_lemur_action_items_fails(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_task_succeeds(httpx_mock: HTTPXMock):
def test_lemur_task_succeeds_transcript(httpx_mock: HTTPXMock):
"""
Tests whether creating a task request succeeds.
"""
@@ -310,6 +482,38 @@ def test_lemur_task_succeeds(httpx_mock: HTTPXMock):
assert len(httpx_mock.get_requests()) == 1


def test_lemur_task_succeeds_input_text(httpx_mock: HTTPXMock):
"""
Tests whether creating a task request succeeds.
"""

# create a mock response of a LemurSummaryResponse
mock_lemur_task_response = factories.generate_dict_factory(
factories.LemurTaskResponse
)()

# mock the specific endpoints
httpx_mock.add_response(
url=f"{aai.settings.base_url}{ENDPOINT_LEMUR}/task",
status_code=httpx.codes.OK,
method="POST",
json=mock_lemur_task_response,
)
# test input_text input
lemur = aai.Lemur()
result = lemur.task(
prompt="Create action items of the meeting", input_text="Test test"
)

# check the response
assert isinstance(result, aai.LemurTaskResponse)

assert result.response == mock_lemur_task_response["response"]

# check whether we mocked everything
assert len(httpx_mock.get_requests()) == 1


def test_lemur_ask_coach_fails(httpx_mock: HTTPXMock):
"""
Tests whether creating a task request fails.

0 comments on commit 770c155

Please sign in to comment.