Implement batch processing for AI analysis in job search parser
- Introduced batch processing to handle job analysis in smaller groups, reducing the risk of timeouts. - Added logging for batch processing status, including success and error messages. - Implemented fallback results for failed batches to ensure continuity in analysis results.
This commit is contained in:
parent
c692853c38
commit
8f526b3518
@ -204,11 +204,37 @@ async function startJobSearchParser(options = {}) {
|
||||
timestamp: job.extractedAt || job.postedDate || "",
|
||||
}));
|
||||
|
||||
analysisResults = await analyzeBatch(
|
||||
analysisData,
|
||||
AI_CONTEXT,
|
||||
OLLAMA_MODEL
|
||||
);
|
||||
// Process in smaller batches to avoid timeouts (5 jobs per batch)
|
||||
const BATCH_SIZE = parseInt(process.env.AI_BATCH_SIZE) || 5;
|
||||
analysisResults = [];
|
||||
|
||||
for (let i = 0; i < analysisData.length; i += BATCH_SIZE) {
|
||||
const batch = analysisData.slice(i, i + BATCH_SIZE);
|
||||
const batchNumber = Math.floor(i / BATCH_SIZE) + 1;
|
||||
const totalBatches = Math.ceil(analysisData.length / BATCH_SIZE);
|
||||
|
||||
logger.info(` Processing batch ${batchNumber}/${totalBatches} (${batch.length} jobs)...`);
|
||||
|
||||
try {
|
||||
const batchResults = await analyzeBatch(
|
||||
batch,
|
||||
AI_CONTEXT,
|
||||
OLLAMA_MODEL
|
||||
);
|
||||
analysisResults.push(...batchResults);
|
||||
logger.success(` ✅ Batch ${batchNumber} completed`);
|
||||
} catch (error) {
|
||||
logger.error(` ❌ Batch ${batchNumber} failed: ${error.message}`);
|
||||
// Add fallback results for this batch
|
||||
const fallbackResults = batch.map((_, idx) => ({
|
||||
postIndex: i + idx + 1,
|
||||
isRelevant: true,
|
||||
confidence: 0.3,
|
||||
reasoning: `Analysis failed: ${error.message}`,
|
||||
}));
|
||||
analysisResults.push(...fallbackResults);
|
||||
}
|
||||
}
|
||||
|
||||
// Embed AI analysis into each job result
|
||||
allResults.forEach((job, index) => {
|
||||
|
||||
Loading…
x
Reference in New Issue
Block a user