Improve web search and cron scheduling

- Update web search to use ddgs package (renamed from duckduckgo_search)
- Add ddgs>=9.0.0 to dependencies in pyproject.toml
- Fix cron tool to handle recurring jobs with duration limits
  - When both every_seconds and in_seconds are provided, create multiple
    one-time jobs instead of ignoring every_seconds
  - Fixes issue where 'remind me every X for Y duration' only created
    a single reminder instead of multiple recurring reminders
This commit is contained in:
tanyar09 2026-03-04 13:18:13 -05:00
parent 192b975861
commit edb409bb0c
3 changed files with 83 additions and 47 deletions

View File

@ -131,6 +131,34 @@ class CronTool(Tool):
# Build schedule - prioritize 'in_seconds' for relative time, then 'at' for absolute time
delete_after = False
# Special case: recurring job with duration limit (every_seconds + in_seconds)
if every_seconds is not None and in_seconds is not None:
# Create multiple one-time jobs for "every X seconds for Y seconds"
from datetime import datetime, timedelta
num_jobs = max(1, in_seconds // every_seconds)
results = []
for i in range(num_jobs):
job_time = datetime.now() + timedelta(seconds=i * every_seconds)
job_at = job_time.isoformat()
try:
dt = datetime.fromisoformat(job_at)
at_ms = int(dt.timestamp() * 1000)
schedule = CronSchedule(kind="at", at_ms=at_ms)
job = self._cron.add_job(
name=f"{message[:25]} ({i+1}/{num_jobs})" if num_jobs > 1 else message[:30],
schedule=schedule,
message=message,
deliver=True,
channel=channel,
to=chat_id,
delete_after_run=True,
reminder=reminder,
)
results.append(f"Created job '{job.name}' (id: {job.id})")
except Exception as e:
results.append(f"Error creating job {i+1}: {str(e)}")
return f"Created {len([r for r in results if 'Created' in r])} reminder(s):\n" + "\n".join(results)
# Handle relative time (in_seconds) - compute datetime automatically
if in_seconds is not None:
from datetime import datetime, timedelta

View File

@ -101,9 +101,9 @@ class WebSearchTool(Tool):
try:
n = min(max(count or self.max_results, 1), 10)
# Try using duckduckgo_search library if available
# Try using ddgs library if available (renamed from duckduckgo_search)
try:
from duckduckgo_search import DDGS
from ddgs import DDGS
with DDGS() as ddgs:
results = []
for r in ddgs.text(query, max_results=n):
@ -112,7 +112,7 @@ class WebSearchTool(Tool):
"url": r.get("href", ""),
"description": r.get("body", "")
})
if not results:
return f"No results found for: {query}"
@ -123,51 +123,58 @@ class WebSearchTool(Tool):
lines.append(f" {item['description']}")
return "\n".join(lines)
except ImportError:
# Fallback: use DuckDuckGo instant answer API (simpler, but limited)
async with httpx.AsyncClient(
follow_redirects=True,
timeout=15.0
) as client:
# Use DuckDuckGo instant answer API (no key needed)
url = "https://api.duckduckgo.com/"
r = await client.get(
url,
params={"q": query, "format": "json", "no_html": "1", "skip_disambig": "1"},
headers={"User-Agent": USER_AGENT},
)
r.raise_for_status()
data = r.json()
results = []
# Get RelatedTopics (search results)
if "RelatedTopics" in data:
for topic in data["RelatedTopics"][:n]:
if "Text" in topic and "FirstURL" in topic:
results.append({
"title": topic.get("Text", "").split(" - ")[0] if " - " in topic.get("Text", "") else topic.get("Text", "")[:50],
"url": topic.get("FirstURL", ""),
"description": topic.get("Text", "")
})
# Also check AbstractText for direct answer
if "AbstractText" in data and data["AbstractText"]:
results.insert(0, {
"title": data.get("Heading", query),
"url": data.get("AbstractURL", ""),
"description": data.get("AbstractText", "")
})
if not results:
return f"No results found for: {query}. Try installing 'duckduckgo-search' package for better results: pip install duckduckgo-search"
lines = [f"Results for: {query}\n"]
for i, item in enumerate(results[:n], 1):
lines.append(f"{i}. {item['title']}\n {item['url']}")
if item['description']:
lines.append(f" {item['description']}")
return "\n".join(lines)
# ddgs package not installed, fall through to fallback
pass
except Exception as e:
# Log ddgs errors but fall through to fallback API
import logging
logging.debug(f"ddgs search error: {e}")
# Fallback: use DuckDuckGo instant answer API (simpler, but limited)
async with httpx.AsyncClient(
follow_redirects=True,
timeout=15.0
) as client:
# Use DuckDuckGo instant answer API (no key needed)
url = "https://api.duckduckgo.com/"
r = await client.get(
url,
params={"q": query, "format": "json", "no_html": "1", "skip_disambig": "1"},
headers={"User-Agent": USER_AGENT},
)
r.raise_for_status()
data = r.json()
results = []
# Get RelatedTopics (search results)
if "RelatedTopics" in data:
for topic in data["RelatedTopics"][:n]:
if "Text" in topic and "FirstURL" in topic:
results.append({
"title": topic.get("Text", "").split(" - ")[0] if " - " in topic.get("Text", "") else topic.get("Text", "")[:50],
"url": topic.get("FirstURL", ""),
"description": topic.get("Text", "")
})
# Also check AbstractText for direct answer
if "AbstractText" in data and data["AbstractText"]:
results.insert(0, {
"title": data.get("Heading", query),
"url": data.get("AbstractURL", ""),
"description": data.get("AbstractText", "")
})
if not results:
return f"No results found for: {query}. Try installing 'ddgs' package for better results: pip install ddgs"
lines = [f"Results for: {query}\n"]
for i, item in enumerate(results[:n], 1):
lines.append(f"{i}. {item['title']}\n {item['url']}")
if item['description']:
lines.append(f" {item['description']}")
return "\n".join(lines)
except Exception as e:
return f"Error searching: {e}. Try installing 'duckduckgo-search' package: pip install duckduckgo-search"
return f"Error searching: {e}. Try installing 'ddgs' package: pip install ddgs"
class WebFetchTool(Tool):

View File

@ -24,6 +24,7 @@ dependencies = [
"websockets>=12.0",
"websocket-client>=1.6.0",
"httpx>=0.25.0",
"ddgs>=9.0.0",
"oauth-cli-kit>=0.1.1",
"loguru>=0.7.0",
"readability-lxml>=0.8.0",