- Enhanced `ARCHITECTURE.md` with details on LLM models for work (Llama 3.1 70B Q4) and family agents (Phi-3 Mini 3.8B Q4). - Introduced new documents: - `ASR_EVALUATION.md` for ASR engine evaluation and selection. - `HARDWARE.md` outlining hardware requirements and purchase plans. - `IMPLEMENTATION_GUIDE.md` for Milestone 2 implementation steps. - `LLM_CAPACITY.md` assessing VRAM and context window limits. - `LLM_MODEL_SURVEY.md` surveying open-weight LLM models. - `LLM_USAGE_AND_COSTS.md` detailing LLM usage and operational costs. - `MCP_ARCHITECTURE.md` describing the Model Context Protocol architecture. - `MCP_IMPLEMENTATION_SUMMARY.md` summarizing MCP implementation status. These updates provide comprehensive guidance for the next phases of development and ensure clarity in project documentation.
192 lines
6.1 KiB
Python
192 lines
6.1 KiB
Python
"""
|
|
MCP-LLM Adapter - Converts between LLM function calls and MCP tool calls.
|
|
"""
|
|
|
|
import logging
|
|
import requests
|
|
from typing import Any, Dict, List, Optional
|
|
import json
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
|
|
class MCPAdapter:
|
|
"""
|
|
Adapter that converts LLM function calls to MCP tool calls and back.
|
|
|
|
Supports OpenAI-compatible function calling format.
|
|
"""
|
|
|
|
def __init__(self, mcp_server_url: str = "http://localhost:8000/mcp"):
|
|
"""
|
|
Initialize MCP adapter.
|
|
|
|
Args:
|
|
mcp_server_url: URL of the MCP server endpoint
|
|
"""
|
|
self.mcp_server_url = mcp_server_url
|
|
self._tools_cache: Optional[List[Dict[str, Any]]] = None
|
|
self._request_id = 0
|
|
|
|
def _next_request_id(self) -> int:
|
|
"""Get next request ID for JSON-RPC."""
|
|
self._request_id += 1
|
|
return self._request_id
|
|
|
|
def _make_mcp_request(self, method: str, params: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
|
|
"""
|
|
Make a JSON-RPC request to MCP server.
|
|
|
|
Args:
|
|
method: JSON-RPC method name
|
|
params: Method parameters
|
|
|
|
Returns:
|
|
JSON-RPC response
|
|
"""
|
|
request = {
|
|
"jsonrpc": "2.0",
|
|
"method": method,
|
|
"id": self._next_request_id()
|
|
}
|
|
|
|
if params:
|
|
request["params"] = params
|
|
|
|
try:
|
|
response = requests.post(
|
|
self.mcp_server_url,
|
|
json=request,
|
|
headers={"Content-Type": "application/json"},
|
|
timeout=30
|
|
)
|
|
response.raise_for_status()
|
|
return response.json()
|
|
except requests.exceptions.RequestException as e:
|
|
logger.error(f"MCP request failed: {e}")
|
|
raise
|
|
|
|
def discover_tools(self, force_refresh: bool = False) -> List[Dict[str, Any]]:
|
|
"""
|
|
Discover available tools from MCP server.
|
|
|
|
Args:
|
|
force_refresh: Force refresh of cached tools
|
|
|
|
Returns:
|
|
List of tools in OpenAI function format
|
|
"""
|
|
if self._tools_cache is None or force_refresh:
|
|
logger.info("Discovering tools from MCP server...")
|
|
response = self._make_mcp_request("tools/list")
|
|
|
|
# Check for actual errors (error field exists and is not None)
|
|
if "error" in response and response["error"] is not None:
|
|
error = response["error"]
|
|
error_msg = f"MCP error: {error.get('message', 'Unknown error')}"
|
|
logger.error(error_msg)
|
|
raise Exception(error_msg)
|
|
|
|
mcp_tools = response.get("result", {}).get("tools", [])
|
|
|
|
# Convert MCP tool format to OpenAI function format
|
|
self._tools_cache = []
|
|
for tool in mcp_tools:
|
|
openai_tool = {
|
|
"type": "function",
|
|
"function": {
|
|
"name": tool["name"],
|
|
"description": tool["description"],
|
|
"parameters": tool.get("inputSchema", {})
|
|
}
|
|
}
|
|
self._tools_cache.append(openai_tool)
|
|
|
|
logger.info(f"Discovered {len(self._tools_cache)} tools")
|
|
|
|
return self._tools_cache
|
|
|
|
def call_tool(self, function_call: Dict[str, Any]) -> str:
|
|
"""
|
|
Call a tool via MCP server.
|
|
|
|
Args:
|
|
function_call: LLM function call in OpenAI format
|
|
{
|
|
"name": "tool_name",
|
|
"arguments": {...}
|
|
}
|
|
|
|
Returns:
|
|
Tool result as string (for LLM to process)
|
|
"""
|
|
tool_name = function_call.get("name")
|
|
arguments = function_call.get("arguments", {})
|
|
|
|
if not tool_name:
|
|
raise ValueError("Function call missing 'name' field")
|
|
|
|
logger.info(f"Calling tool: {tool_name} with arguments: {arguments}")
|
|
|
|
# Make MCP call
|
|
response = self._make_mcp_request(
|
|
"tools/call",
|
|
params={
|
|
"name": tool_name,
|
|
"arguments": arguments
|
|
}
|
|
)
|
|
|
|
# Handle errors (check if error exists and is not None)
|
|
if "error" in response and response["error"] is not None:
|
|
error = response["error"]
|
|
error_msg = f"Tool '{tool_name}' failed: {error.get('message', 'Unknown error')}"
|
|
logger.error(error_msg)
|
|
raise Exception(error_msg)
|
|
|
|
# Extract result content
|
|
result = response.get("result", {})
|
|
content = result.get("content", [])
|
|
|
|
# Convert MCP content to string for LLM
|
|
if not content:
|
|
return f"Tool '{tool_name}' returned no content"
|
|
|
|
# Combine all text content
|
|
text_parts = []
|
|
for item in content:
|
|
if item.get("type") == "text":
|
|
text_parts.append(item.get("text", ""))
|
|
|
|
result_text = "\n".join(text_parts) if text_parts else f"Tool '{tool_name}' executed successfully"
|
|
|
|
logger.info(f"Tool '{tool_name}' returned: {result_text[:100]}...")
|
|
return result_text
|
|
|
|
def get_tools_for_llm(self) -> List[Dict[str, Any]]:
|
|
"""
|
|
Get tools in OpenAI function format for LLM.
|
|
|
|
Returns:
|
|
List of tools in OpenAI format
|
|
"""
|
|
tools = self.discover_tools()
|
|
return [tool["function"] for tool in tools]
|
|
|
|
def health_check(self) -> bool:
|
|
"""
|
|
Check if MCP server is healthy.
|
|
|
|
Returns:
|
|
True if server is healthy, False otherwise
|
|
"""
|
|
try:
|
|
response = requests.get(
|
|
self.mcp_server_url.replace("/mcp", "/health"),
|
|
timeout=5
|
|
)
|
|
return response.status_code == 200
|
|
except Exception as e:
|
|
logger.error(f"Health check failed: {e}")
|
|
return False
|