You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
23:06:40-019558 INFO Loading "Mistral-7B-v0.3-Chinese-Chat"
23:06:40-025561 INFO TRANSFORMERS_PARAMS=
{ 'low_cpu_mem_usage': True,
'torch_dtype': torch.bfloat16,
'device_map': 'auto',
'quantization_config': BitsAndBytesConfig {
"_load_in_4bit": true,
"_load_in_8bit": false,
"bnb_4bit_compute_dtype": "bfloat16",
"bnb_4bit_quant_storage": "uint8",
"bnb_4bit_quant_type": "nf4",
"bnb_4bit_use_double_quant": false,
"llm_int8_enable_fp32_cpu_offload": true,
"llm_int8_has_fp16_weight": false,
"llm_int8_skip_modules": null,
"llm_int8_threshold": 6.0,
"load_in_4bit": true,
"load_in_8bit": false,
"quant_method": "bitsandbytes"
}
}
D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\transformers\generation\configuration_utils.py:600: UserWarning: `do_sample` is set to `False`. However, `min_p` is set to `0.0` -- this flag is only used in sample-based generation modes. You should set`do_sample=True` or unset`min_p`.
warnings.warn(
Loading checkpoint shards: 100%|█████████████████████████████████████████████████████████| 3/3 [00:08<00:00, 2.83s/it]
23:06:50-884325 INFO Loaded "Mistral-7B-v0.3-Chinese-Chat"in 10.86 seconds.
23:06:50-886037 INFO LOADER: "Transformers"
23:06:50-887289 INFO TRUNCATION LENGTH: 32768
23:06:50-888036 INFO INSTRUCTION TEMPLATE: "Custom (obtained from model metadata)"
Traceback (most recent call last):
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\queueing.py", line 527, in process_events
response = await route_utils.call_process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\route_utils.py", line 261, in call_process_api
output = await app.get_blocks().process_api(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\blocks.py", line 1786, in process_api
result = await self.call_function(
^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\blocks.py", line 1350, in call_function
prediction = await utils.async_iteration(iterator)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 583, in async_iteration
return await iterator.__anext__()
^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 576, in __anext__
return await anyio.to_thread.run_sync(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\anyio\to_thread.py", line 56, in run_sync
return await get_async_backend().run_sync_in_worker_thread(
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\anyio\_backends\_asyncio.py", line 2441, in run_sync_in_worker_thread
return await future
^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\anyio\_backends\_asyncio.py", line 943, in run
result = context.run(func, *args)
^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 559, in run_sync_iterator_async
return next(iterator)
^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\gradio\utils.py", line 742, in gen_wrapper
response = next(iterator)
^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\modules\chat.py", line 436, in generate_chat_reply_wrapper
fori, historyin enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True, for_ui=True)):
File "D:\ai\text-generation-webui-1.16\modules\chat.py", line 403, in generate_chat_reply
forhistoryin chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message, for_ui=for_ui):
File "D:\ai\text-generation-webui-1.16\modules\chat.py", line 305, in chatbot_wrapper
stopping_strings = get_stopping_strings(state)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\modules\chat.py", line 265, in get_stopping_strings
prefix_bot, suffix_bot = get_generation_prompt(renderer, impersonate=False)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\modules\chat.py", line 71, in get_generation_prompt
prompt = renderer(messages=messages)
^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\jinja2\environment.py", line 1304, in render
self.environment.handle_exception()
File "D:\ai\text-generation-webui-1.16\installer_files\env\Lib\site-packages\jinja2\environment.py", line 939, in handle_exception
raise rewrite_traceback_stack(source=source)
File "<template>", line 1, in top-level template code
jinja2.exceptions.UndefinedError: 'system_message' is undefined
System Info
11th Gen Intel(R) Core(TM) i7-11800H
NVIDIA RTX A2000 Laptop GPU
The text was updated successfully, but these errors were encountered:
Describe the bug
Can’t chat with it.
Is there an existing issue for this?
Reproduction
Click "Load" and then start chatting with it.
Screenshot
No response
Logs
System Info
The text was updated successfully, but these errors were encountered: