diff --git a/backend/tasks/views.py b/backend/tasks/views.py index 5d251e2c..2130d390 100644 --- a/backend/tasks/views.py +++ b/backend/tasks/views.py @@ -1319,7 +1319,9 @@ def partial_update(self, request, pk=None): == "AcousticNormalisedTranscriptionEditing" else False ) - + interaction_llm = False + if "interaction_llm" in request.data: + interaction_llm = True # Base annotation update if annotation_obj.annotation_type == ANNOTATOR_ANNOTATION: if request.user not in task.annotation_users.all(): @@ -1346,16 +1348,28 @@ def partial_update(self, request, pk=None): annotation_obj.task.project_id.project_type == "InstructionDrivenChat" ): - output_result = get_llm_output( - request.data["result"], - annotation_obj.task, - annotation_obj, - annotation_obj.task.project_id.metadata_json, - ) - # store the result of all checks as well - annotation_obj.result.append( - {"prompt": request.data["result"], "output": output_result} - ) + if not interaction_llm: + output_result = get_llm_output( + request.data["result"], + annotation_obj.task, + annotation_obj, + annotation_obj.task.project_id.metadata_json, + ) + # store the result of all checks as well + annotation_obj.result.append( + { + "prompt": request.data["result"], + "output": output_result, + } + ) + # remove this check when you store the check results also + if ( + interaction_llm + and isinstance(request.data["result"], list) + and isinstance(annotation_obj.result, list) + and len(annotation_obj.result) < len(request.data["result"]) + ): + annotation_obj.result = request.data["result"] annotation_obj.meta_stats = ( compute_meta_stats_for_instruction_driven_chat( annotation_obj.result @@ -1421,16 +1435,25 @@ def partial_update(self, request, pk=None): annotation_obj.task.project_id.project_type == "InstructionDrivenChat" ): - output_result = get_llm_output( - request.data["result"], - annotation_obj.task, - annotation_obj, - annotation_obj.task.project_id.metadata_json, - ) - # store the result of all checks as well - annotation_obj.result.append( - {"prompt": request.data["result"], "output": output_result} - ) + if not interaction_llm: + output_result = get_llm_output( + request.data["result"], + annotation_obj.task, + annotation_obj, + annotation_obj.task.project_id.metadata_json, + ) + # store the result of all checks as well + annotation_obj.result.append( + {"prompt": request.data["result"], "output": output_result} + ) + # remove this check when you store the check results also + if ( + interaction_llm + and isinstance(request.data["result"], list) + and isinstance(annotation_obj.result, list) + and len(annotation_obj.result) < len(request.data["result"]) + ): + annotation_obj.result = request.data["result"] is_IDC = True request.data["result"] = annotation_obj.result request.data[ @@ -1500,16 +1523,27 @@ def partial_update(self, request, pk=None): annotation_obj.task.project_id.project_type == "InstructionDrivenChat" ): - output_result = get_llm_output( - request.data["result"], - annotation_obj.task, - annotation_obj, - annotation_obj.task.project_id.metadata_json, - ) - # store the result of all checks as well - annotation_obj.result.append( - {"prompt": request.data["result"], "output": output_result} - ) + if not interaction_llm: + output_result = get_llm_output( + request.data["result"], + annotation_obj.task, + annotation_obj, + annotation_obj.task.project_id.metadata_json, + ) + # store the result of all checks as well + annotation_obj.result.append( + { + "prompt": request.data["result"], + "output": output_result, + } + ) + if ( + interaction_llm + and isinstance(request.data["result"], list) + and isinstance(annotation_obj.result, list) + and len(annotation_obj.result) < len(request.data["result"]) + ): + annotation_obj.result = request.data["result"] annotation_obj.meta_stats = ( compute_meta_stats_for_instruction_driven_chat( annotation_obj.result @@ -1614,16 +1648,24 @@ def partial_update(self, request, pk=None): annotation_obj.task.project_id.project_type == "InstructionDrivenChat" ): - output_result = get_llm_output( - request.data["result"], - annotation_obj.task, - annotation_obj, - annotation_obj.task.project_id.metadata_json, - ) - # store the result of all checks as well - annotation_obj.result.append( - {"prompt": request.data["result"], "output": output_result} - ) + if not interaction_llm: + output_result = get_llm_output( + request.data["result"], + annotation_obj.task, + annotation_obj, + annotation_obj.task.project_id.metadata_json, + ) + # store the result of all checks as well + annotation_obj.result.append( + {"prompt": request.data["result"], "output": output_result} + ) + if ( + interaction_llm + and isinstance(request.data["result"], list) + and isinstance(annotation_obj.result, list) + and len(annotation_obj.result) < len(request.data["result"]) + ): + annotation_obj.result = request.data["result"] is_IDC = True request.data["result"] = annotation_obj.result request.data[ @@ -1720,16 +1762,27 @@ def partial_update(self, request, pk=None): annotation_obj.task.project_id.project_type == "InstructionDrivenChat" ): - output_result = get_llm_output( - request.data["result"], - annotation_obj.task, - annotation_obj, - annotation_obj.task.project_id.metadata_json, - ) - # store the result of all checks as well - annotation_obj.result.append( - {"prompt": request.data["result"], "output": output_result} - ) + if not interaction_llm: + output_result = get_llm_output( + request.data["result"], + annotation_obj.task, + annotation_obj, + annotation_obj.task.project_id.metadata_json, + ) + # store the result of all checks as well + annotation_obj.result.append( + { + "prompt": request.data["result"], + "output": output_result, + } + ) + if ( + interaction_llm + and isinstance(request.data["result"], list) + and isinstance(annotation_obj.result, list) + and len(annotation_obj.result) < len(request.data["result"]) + ): + annotation_obj.result = request.data["result"] annotation_obj.meta_stats = ( compute_meta_stats_for_instruction_driven_chat( annotation_obj.result @@ -1825,16 +1878,24 @@ def partial_update(self, request, pk=None): annotation_obj.task.project_id.project_type == "InstructionDrivenChat" ): - output_result = get_llm_output( - request.data["result"], - annotation_obj.task, - annotation_obj, - annotation_obj.task.project_id.metadata_json, - ) - # store the result of all checks as well - annotation_obj.result.append( - {"prompt": request.data["result"], "output": output_result} - ) + if not interaction_llm: + output_result = get_llm_output( + request.data["result"], + annotation_obj.task, + annotation_obj, + annotation_obj.task.project_id.metadata_json, + ) + # store the result of all checks as well + annotation_obj.result.append( + {"prompt": request.data["result"], "output": output_result} + ) + if ( + interaction_llm + and isinstance(request.data["result"], list) + and isinstance(annotation_obj.result, list) + and len(annotation_obj.result) < len(request.data["result"]) + ): + annotation_obj.result = request.data["result"] is_IDC = True request.data["result"] = annotation_obj.result request.data[