fix: streaming response fix + GPT-OSS-20B-Uncensored MXFP4 GGUF

- Fix async generator streaming: _stream_generate yields directly
  instead of returning nested _iter(), route handler awaits generate()
  then passes async generator to StreamingResponse
- Replace aoxo/gpt-oss-20b-uncensored (no quant, OOM) with
  HauhauCS MXFP4 GGUF via llama-cpp backend

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
tlg
2026-04-06 22:21:22 +02:00
parent 61308703dc
commit 06923d51b4
3 changed files with 19 additions and 23 deletions

View File

@@ -133,14 +133,11 @@ class LlamaCppBackend(BaseBackend):
loop = asyncio.get_event_loop()
stream = await loop.run_in_executor(None, _run)
async def _iter():
for chunk in stream:
chunk["model"] = model_id
yield f"data: {json.dumps(chunk)}\n\n"
yield "data: [DONE]\n\n"
return _iter()
def _create_think_handler(llm, enable_thinking: bool):
"""Create a chat handler with thinking enabled or disabled via Jinja template."""

View File

@@ -112,9 +112,8 @@ class TransformersLLMBackend(BaseBackend):
chat_id = f"chatcmpl-{uuid.uuid4().hex[:12]}"
created = int(time.time())
async def _iter():
loop = asyncio.get_event_loop()
while True:
token = await loop.run_in_executor(None, lambda: next(streamer, None))
if token is None:
@@ -126,8 +125,6 @@ class TransformersLLMBackend(BaseBackend):
yield f"data: {json.dumps(chunk)}\n\n"
thread.join()
return _iter()
# Physical model config injection
_physical_models: dict[str, PhysicalModel] = {}

View File

@@ -37,10 +37,12 @@ def create_chat_router(registry, vram_manager, backends, require_api_key):
messages = body.get("messages", [])
stream = body.get("stream", False)
tools = body.get("tools")
result = await backend.generate(model_id=physical_id, messages=messages, params=params, stream=stream, tools=tools)
if stream:
return StreamingResponse(result, media_type="text/event-stream")
# generate() is async def that returns an async generator
stream_iter = await backend.generate(model_id=physical_id, messages=messages, params=params, stream=True, tools=tools)
return StreamingResponse(stream_iter, media_type="text/event-stream")
result = await backend.generate(model_id=physical_id, messages=messages, params=params, stream=False, tools=tools)
return result
return router