fix: proper VRAM cleanup on model unload + CUDA alloc config
- Force gc.collect() before torch.cuda.empty_cache() to ensure all model references are released - Set PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True in container Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
@@ -57,6 +57,9 @@ COPY --from=builder /usr/local/lib/python3.12/dist-packages/llama_cpp_python* /u
|
||||
COPY llmux/ /app/llmux/
|
||||
WORKDIR /app
|
||||
|
||||
# Avoid CUDA memory fragmentation when swapping models
|
||||
ENV PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True
|
||||
|
||||
# Run the server
|
||||
EXPOSE 8081
|
||||
CMD ["uvicorn", "llmux.main:app", "--host", "0.0.0.0", "--port", "8081"]
|
||||
|
||||
Reference in New Issue
Block a user