83 lines
2.5 KiB
TypeScript
83 lines
2.5 KiB
TypeScript
import { describe, expect, it } from "vitest";
|
|
import { appRouter } from "./routers";
|
|
import type { TrpcContext } from "./_core/context";
|
|
|
|
function createPublicContext(): TrpcContext {
|
|
return {
|
|
user: null,
|
|
req: {
|
|
protocol: "https",
|
|
headers: {},
|
|
} as TrpcContext["req"],
|
|
res: {
|
|
clearCookie: () => {},
|
|
} as TrpcContext["res"],
|
|
};
|
|
}
|
|
|
|
describe("ollama API integration", () => {
|
|
it("health check returns connected status and latency", async () => {
|
|
const ctx = createPublicContext();
|
|
const caller = appRouter.createCaller(ctx);
|
|
|
|
const result = await caller.ollama.health();
|
|
|
|
expect(result).toHaveProperty("connected");
|
|
expect(result).toHaveProperty("latencyMs");
|
|
expect(typeof result.connected).toBe("boolean");
|
|
expect(typeof result.latencyMs).toBe("number");
|
|
// API should be reachable
|
|
expect(result.connected).toBe(true);
|
|
});
|
|
|
|
it("models endpoint returns a list of models", async () => {
|
|
const ctx = createPublicContext();
|
|
const caller = appRouter.createCaller(ctx);
|
|
|
|
const result = await caller.ollama.models();
|
|
|
|
expect(result).toHaveProperty("success");
|
|
expect(result.success).toBe(true);
|
|
expect(result).toHaveProperty("models");
|
|
expect(Array.isArray(result.models)).toBe(true);
|
|
expect(result.models.length).toBeGreaterThan(0);
|
|
|
|
// Each model should have an id
|
|
expect(result.models[0]).toHaveProperty("id");
|
|
expect(typeof result.models[0].id).toBe("string");
|
|
});
|
|
|
|
it("chat endpoint sends a message and gets a response", async () => {
|
|
const ctx = createPublicContext();
|
|
const caller = appRouter.createCaller(ctx);
|
|
|
|
// First get available models
|
|
const modelsResult = await caller.ollama.models();
|
|
expect(modelsResult.success).toBe(true);
|
|
expect(modelsResult.models.length).toBeGreaterThan(0);
|
|
|
|
const modelId = modelsResult.models[0].id;
|
|
|
|
const result = await caller.ollama.chat({
|
|
model: modelId,
|
|
messages: [
|
|
{ role: "user", content: "Reply with exactly the word: hello" },
|
|
],
|
|
temperature: 0,
|
|
max_tokens: 20,
|
|
});
|
|
|
|
expect(result).toHaveProperty("success");
|
|
// The chat call itself should succeed (no network error)
|
|
// Response content may vary depending on model availability
|
|
if (result.success) {
|
|
expect(typeof result.response).toBe("string");
|
|
// Model responded (even if empty for some cloud models)
|
|
expect(result.model).toBeTruthy();
|
|
} else {
|
|
// If it failed, there should be an error message
|
|
expect(result.error).toBeTruthy();
|
|
}
|
|
}, 60_000);
|
|
});
|