105 lines
3.6 KiB
JavaScript
105 lines
3.6 KiB
JavaScript
"use strict";
|
|
var __importDefault = (this && this.__importDefault) || function (mod) {
|
|
return (mod && mod.__esModule) ? mod : { "default": mod };
|
|
};
|
|
Object.defineProperty(exports, "__esModule", { value: true });
|
|
exports.ollamaRoutes = void 0;
|
|
const express_1 = require("express");
|
|
const ollama_1 = __importDefault(require("ollama"));
|
|
exports.ollamaRoutes = (0, express_1.Router)();
|
|
// Generate text with Ollama
|
|
exports.ollamaRoutes.post('/generate', async (req, res) => {
|
|
try {
|
|
const { model, prompt, stream = false } = req.body;
|
|
if (!model || !prompt) {
|
|
return res.status(400).json({ error: 'model and prompt are required' });
|
|
}
|
|
const response = await ollama_1.default.chat({
|
|
model: model || 'llama3',
|
|
messages: [{ role: 'user', content: prompt }],
|
|
stream: false,
|
|
});
|
|
res.json({
|
|
content: response.message.content,
|
|
model: response.model
|
|
});
|
|
}
|
|
catch (error) {
|
|
console.error('Ollama generate error:', error);
|
|
res.status(500).json({
|
|
error: 'Failed to generate content',
|
|
details: error instanceof Error ? error.message : String(error)
|
|
});
|
|
}
|
|
});
|
|
// Stream text generation (SSE)
|
|
exports.ollamaRoutes.post('/stream', async (req, res) => {
|
|
try {
|
|
const { model, prompt } = req.body;
|
|
if (!model || !prompt) {
|
|
return res.status(400).json({ error: 'model and prompt are required' });
|
|
}
|
|
res.setHeader('Content-Type', 'text/event-stream');
|
|
res.setHeader('Cache-Control', 'no-cache');
|
|
res.setHeader('Connection', 'keep-alive');
|
|
const stream = await ollama_1.default.chat({
|
|
model: model || 'llama3',
|
|
messages: [{ role: 'user', content: prompt }],
|
|
stream: true,
|
|
});
|
|
for await (const chunk of stream) {
|
|
const content = chunk.message?.content || '';
|
|
if (content) {
|
|
res.write(`data: ${JSON.stringify({ content })}\n\n`);
|
|
}
|
|
}
|
|
res.write('data: [DONE]\n\n');
|
|
res.end();
|
|
}
|
|
catch (error) {
|
|
console.error('Ollama stream error:', error);
|
|
res.write(`data: ${JSON.stringify({ error: 'Streaming failed' })}\n\n`);
|
|
res.end();
|
|
}
|
|
});
|
|
// Generate image (if using a model that supports it)
|
|
exports.ollamaRoutes.post('/image', async (req, res) => {
|
|
try {
|
|
const { prompt, model = 'llama3.2-vision' } = req.body;
|
|
if (!prompt) {
|
|
return res.status(400).json({ error: 'prompt is required' });
|
|
}
|
|
// Note: Ollama's image generation depends on the model
|
|
// Available models: llama3.2-vision
|
|
const response = await ollama_1.default.generate({
|
|
model,
|
|
prompt,
|
|
});
|
|
res.json({
|
|
image: response.image,
|
|
model
|
|
});
|
|
}
|
|
catch (error) {
|
|
console.error('Image generation error:', error);
|
|
res.status(500).json({
|
|
error: 'Failed to generate image',
|
|
details: error instanceof Error ? error.message : String(error)
|
|
});
|
|
}
|
|
});
|
|
// List available models
|
|
exports.ollamaRoutes.get('/models', async (req, res) => {
|
|
try {
|
|
const response = await ollama_1.default.list();
|
|
res.json({ models: response.models });
|
|
}
|
|
catch (error) {
|
|
console.error('Model list error:', error);
|
|
res.status(500).json({
|
|
error: 'Failed to list models',
|
|
details: error instanceof Error ? error.message : String(error)
|
|
});
|
|
}
|
|
});
|
|
//# sourceMappingURL=ollama.js.map
|