mejoras varias en frontend, separacion de intent y state, pick de articulos

This commit is contained in:
Lucas Tettamanti
2026-01-06 15:50:02 -03:00
parent dab52492b4
commit 8bb21b4edb
17 changed files with 1826 additions and 209 deletions

View File

@@ -1,9 +1,25 @@
import OpenAI from "openai";
import { z } from "zod";
const apiKey = process.env.OPENAI_API_KEY || process.env.OPENAI_APIKEY;
let _client = null;
let _clientKey = null;
export const openai = new OpenAI({ apiKey });
function getApiKey() {
return process.env.OPENAI_API_KEY || process.env.OPENAI_APIKEY || null;
}
function getClient() {
const apiKey = getApiKey();
if (!apiKey) {
const err = new Error("OPENAI_API_KEY is not set");
err.code = "OPENAI_NO_KEY";
throw err;
}
if (_client && _clientKey === apiKey) return _client;
_clientKey = apiKey;
_client = new OpenAI({ apiKey });
return _client;
}
const NextStateSchema = z.enum([
"IDLE",
@@ -53,6 +69,19 @@ const PlanSchema = z
})
.strict();
const ExtractItemSchema = z.object({
label: z.string().min(1),
quantity: z.number().positive(),
unit: z.enum(["kg", "g", "unit"]),
});
const ExtractSchema = z
.object({
intent: IntentSchema,
items: z.array(ExtractItemSchema).default([]),
})
.strict();
function extractJsonObject(text) {
const s = String(text || "");
const i = s.indexOf("{");
@@ -61,36 +90,29 @@ function extractJsonObject(text) {
return null;
}
/**
* Genera un "plan" de conversación (salida estructurada) usando OpenAI.
*
* - `promptSystem`: instrucciones del bot
* - `input`: { last_user_message, conversation_history, current_conversation_state, context }
*/
export async function llmPlan({ promptSystem, input, model } = {}) {
if (!apiKey) {
const err = new Error("OPENAI_API_KEY is not set");
err.code = "OPENAI_NO_KEY";
throw err;
}
async function jsonCompletion({ system, user, model }) {
const openai = getClient();
const chosenModel = model || process.env.OPENAI_MODEL || "gpt-4o-mini";
const debug = String(process.env.LLM_DEBUG || "") === "1";
if (debug) console.log("[llm] openai.request", { model: chosenModel });
const resp = await openai.chat.completions.create({
model: chosenModel,
temperature: 0.2,
response_format: { type: "json_object" },
messages: [
{
role: "system",
content:
`${promptSystem}\n\n` +
"Respondé SOLO con un JSON válido (sin markdown). Respetá estrictamente el formato requerido.",
},
{ role: "user", content: JSON.stringify(input ?? {}) },
{ role: "system", content: system },
{ role: "user", content: user },
],
});
if (debug)
console.log("[llm] openai.response", {
id: resp?.id || null,
model: resp?.model || null,
usage: resp?.usage || null,
});
const text = resp?.choices?.[0]?.message?.content || "";
let parsed;
try {
@@ -100,12 +122,51 @@ export async function llmPlan({ promptSystem, input, model } = {}) {
if (!extracted) throw new Error("openai_invalid_json");
parsed = JSON.parse(extracted);
}
return { parsed, raw_text: text, model: chosenModel, usage: resp?.usage || null };
}
/**
* Genera un "plan" de conversación (salida estructurada) usando OpenAI.
*
* - `promptSystem`: instrucciones del bot
* - `input`: { last_user_message, conversation_history, current_conversation_state, context }
*/
export async function llmPlan({ promptSystem, input, model } = {}) {
const system =
`${promptSystem}\n\n` +
"Respondé SOLO con un JSON válido (sin markdown). Respetá estrictamente el formato requerido.";
const { parsed, raw_text, model: chosenModel, usage } = await jsonCompletion({
system,
user: JSON.stringify(input ?? {}),
model,
});
const plan = PlanSchema.parse(parsed);
return {
plan,
raw_text: text,
raw_text,
model: chosenModel,
usage: resp?.usage || null,
usage,
};
}
/**
* Paso 1: extracción de intención + items mencionados (sin resolver IDs).
* Devuelve SOLO: intent + items[{label, quantity, unit}]
*/
export async function llmExtract({ input, model } = {}) {
const system =
"Extraé intención e items del mensaje del usuario.\n" +
"Respondé SOLO JSON válido (sin markdown) con keys EXACTAS:\n" +
`intent (one of: ${IntentSchema.options.join("|")}), items (array of {label, quantity, unit(kg|g|unit)}).\n` +
"Si no hay items claros, devolvé items: [].";
const { parsed, raw_text, model: chosenModel, usage } = await jsonCompletion({
system,
user: JSON.stringify(input ?? {}),
model,
});
const extracted = ExtractSchema.parse(parsed);
return { extracted, raw_text, model: chosenModel, usage };
}