/** * Payment Specialist - Extracción de método de pago */ import OpenAI from "openai"; import { loadPrompt } from "../promptLoader.js"; import { validatePayment, getValidationErrors, createEmptyNlu } from "../schemas.js"; let _client = null; function getClient() { const apiKey = process.env.OPENAI_API_KEY || process.env.OPENAI_APIKEY; if (!apiKey) { throw new Error("OPENAI_API_KEY is not set"); } if (!_client) { _client = new OpenAI({ apiKey }); } return _client; } function extractJson(text) { const s = String(text || ""); const i = s.indexOf("{"); const j = s.lastIndexOf("}"); if (i >= 0 && j > i) { try { return JSON.parse(s.slice(i, j + 1)); } catch { return null; } } return null; } /** * Detecta método de pago por patrones simples */ function detectPaymentMethod(text) { const t = String(text || "").toLowerCase().trim(); // Números (asumiendo 1=efectivo, 2=link del contexto) if (/^1$/.test(t)) return "cash"; if (/^2$/.test(t)) return "link"; // Cash patterns if (/\b(efectivo|cash|plata|billete|cuando (llega|llegue)|en mano)\b/i.test(t)) { return "cash"; } // Link patterns if (/\b(tarjeta|link|transfer|qr|mercadopago|mp|d[eé]bito|cr[eé]dito)\b/i.test(t)) { return "link"; } return null; } /** * Procesa un mensaje de pago * * @param {Object} params * @param {number} params.tenantId - ID del tenant * @param {string} params.text - Mensaje del usuario * @param {Object} params.storeConfig - Config de la tienda * @returns {Object} NLU unificado */ export async function paymentNlu({ tenantId, text, storeConfig = {} }) { // Intentar detección rápida primero const quickMethod = detectPaymentMethod(text); // Si es claramente un número o patrón simple, no llamar al LLM if (quickMethod && text.trim().length < 30) { const nlu = createEmptyNlu(); nlu.intent = "select_payment"; nlu.confidence = 0.9; nlu.entities.payment_method = quickMethod; return { nlu, raw_text: "", model: null, usage: null, validation: { ok: true, skipped_llm: true }, }; } const openai = getClient(); // Cargar prompt de payment const { content: systemPrompt, model } = await loadPrompt({ tenantId, promptKey: "payment", variables: storeConfig, }); // Hacer la llamada al LLM const response = await openai.chat.completions.create({ model: model || "gpt-4o-mini", temperature: 0.1, max_tokens: 100, response_format: { type: "json_object" }, messages: [ { role: "system", content: systemPrompt }, { role: "user", content: text }, ], }); const rawText = response?.choices?.[0]?.message?.content || ""; let parsed = extractJson(rawText); // Validar if (!parsed || !validatePayment(parsed)) { // Fallback con detección por patrones parsed = { intent: "select_payment", payment_method: quickMethod, }; } // Convertir a formato NLU unificado const nlu = createEmptyNlu(); nlu.intent = "select_payment"; nlu.confidence = 0.85; nlu.entities.payment_method = parsed.payment_method || null; nlu.needs.catalog_lookup = false; return { nlu, raw_text: rawText, model, usage: response?.usage || null, validation: { ok: true }, }; }