This commit is contained in:
rkarahan80 2025-06-03 15:32:04 +03:00 committed by GitHub
commit 8159aade24
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 925 additions and 47 deletions

View File

@ -94,6 +94,25 @@ PERPLEXITY_API_KEY=
# {"region": "us-east-1", "accessKeyId": "yourAccessKeyId", "secretAccessKey": "yourSecretAccessKey", "sessionToken": "yourSessionToken"}
AWS_BEDROCK_CONFIG=
# Azure OpenAI Credentials
# Find your API Key and Endpoint in the Azure Portal: Portal > Azure OpenAI > Your Resource > Keys and Endpoint
# Deployment Name is the name you give your model deployment in Azure OpenAI Studio.
AZURE_OPENAI_API_KEY=
AZURE_OPENAI_ENDPOINT=
AZURE_OPENAI_DEPLOYMENT_NAME=
# Vertex AI (Google Cloud) Credentials
# Project ID and Region can be found in the Google Cloud Console.
# Assumes Application Default Credentials (ADC) for authentication.
# For service account keys, you might need to set GOOGLE_APPLICATION_CREDENTIALS to the path of your JSON key file.
VERTEX_AI_PROJECT_ID=
VERTEX_AI_REGION=
# Granite AI Credentials
# Obtain your API Key and Base URL from your Granite AI provider.
GRANITE_AI_API_KEY=
GRANITE_AI_BASE_URL=
# Include this environment variable if you want more logging for debugging locally
VITE_LOG_LEVEL=debug

View File

@ -14,6 +14,29 @@ import { TbBrain, TbCloudComputing } from 'react-icons/tb';
import { BiCodeBlock, BiChip } from 'react-icons/bi';
import { FaCloud, FaBrain } from 'react-icons/fa';
import type { IconType } from 'react-icons';
import { VscKey } from 'react-icons/vsc'; // For API Key icon
import { TbBoxModel2 } from 'react-icons/tb'; // For Deployment Name icon
import { GrLocation } from 'react-icons/gr'; // For Region icon
import { AiOutlineProject } from 'react-icons/ai'; // For Project ID icon
// Placeholder SVG components (ideally these would be actual SVGs or from a library)
const AzureOpenAIIcon = () => (
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" fill="#0072C6">
<text x="50%" y="50%" dominantBaseline="middle" textAnchor="middle" fontFamily="Arial" fontSize="40" fill="white">AZ</text>
</svg>
);
const VertexAIIcon = () => (
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" fill="#4285F4">
<text x="50%" y="50%" dominantBaseline="middle" textAnchor="middle" fontFamily="Arial" fontSize="40" fill="white">VX</text>
</svg>
);
const GraniteAIIcon = () => (
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100" fill="#696969">
<text x="50%" y="50%" dominantBaseline="middle" textAnchor="middle" fontFamily="Arial" fontSize="40" fill="white">GR</text>
</svg>
);
// Add type for provider names to ensure type safety
type ProviderName =
@ -30,7 +53,10 @@ type ProviderName =
| 'OpenRouter'
| 'Perplexity'
| 'Together'
| 'XAI';
| 'XAI'
| 'AzureOpenAI'
| 'VertexAI'
| 'GraniteAI';
// Update the PROVIDER_ICONS type to use the ProviderName type
const PROVIDER_ICONS: Record<ProviderName, IconType> = {
@ -48,17 +74,32 @@ const PROVIDER_ICONS: Record<ProviderName, IconType> = {
Perplexity: SiPerplexity,
Together: BsCloud,
XAI: BsRobot,
AzureOpenAI: AzureOpenAIIcon,
VertexAI: VertexAIIcon,
GraniteAI: GraniteAIIcon,
};
// Update PROVIDER_DESCRIPTIONS to use the same type
const PROVIDER_DESCRIPTIONS: Partial<Record<ProviderName, string>> = {
Anthropic: 'Access Claude and other Anthropic models',
OpenAI: 'Use GPT-4, GPT-3.5, and other OpenAI models',
AzureOpenAI: 'Microsoft Azure\'s OpenAI service for powerful AI models.',
VertexAI: 'Google Cloud\'s Vertex AI for custom machine learning models.',
GraniteAI: 'IBM Granite large language models.',
};
interface EditableFieldProps {
provider: IProviderConfig;
fieldKey: keyof IProviderConfig['settings'];
placeholder: string;
IconComponent?: IconType; // Optional icon for the field
isSecret?: boolean; // For fields like API keys
}
const CloudProvidersTab = () => {
const settings = useSettings();
const [editingProvider, setEditingProvider] = useState<string | null>(null);
const [editingField, setEditingField] = useState<string | null>(null); // To track which specific field is being edited
const [filteredProviders, setFilteredProviders] = useState<IProviderConfig[]>([]);
const [categoryEnabled, setCategoryEnabled] = useState<boolean>(false);
@ -116,20 +157,95 @@ const CloudProvidersTab = () => {
const handleUpdateBaseUrl = useCallback(
(provider: IProviderConfig, baseUrl: string) => {
const newBaseUrl: string | undefined = baseUrl.trim() || undefined;
// Update the provider settings in the store
settings.updateProviderSettings(provider.name, { ...provider.settings, baseUrl: newBaseUrl });
logStore.logProvider(`Base URL updated for ${provider.name}`, {
provider: provider.name,
baseUrl: newBaseUrl,
});
logStore.logProvider(`Base URL updated for ${provider.name}`, { provider: provider.name, baseUrl: newBaseUrl });
toast.success(`${provider.name} base URL updated`);
setEditingProvider(null);
setEditingProvider(null); // Keep this for baseUrl specific editing state
setEditingField(null);
},
[settings],
);
const handleUpdateProviderSetting = useCallback(
(provider: IProviderConfig, field: keyof IProviderConfig['settings'], value: string) => {
const trimmedValue = value.trim();
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const newSettings: any = { ...provider.settings };
newSettings[field] = trimmedValue;
settings.updateProviderSettings(provider.name, newSettings);
logStore.logProvider(`${field.toString()} updated for ${provider.name}`, {
provider: provider.name,
[field]: trimmedValue,
});
toast.success(`${provider.name} ${field.toString()} updated`);
setEditingField(null); // Reset specific field editing state
setEditingProvider(null); // Also reset provider-level editing state if any
},
[settings],
);
// Component for rendering individual editable fields
const EditableInput: React.FC<EditableFieldProps & { currentEditingField: string | null; onEditClick: (fieldKey: string) => void; }> = ({
provider,
fieldKey,
placeholder,
IconComponent,
isSecret = false,
currentEditingField,
onEditClick,
}) => {
const value = provider.settings[fieldKey] as string || '';
const displayValue = isSecret && value ? '••••••••' : value;
return (
<div className="flex items-center gap-2 mt-2">
{currentEditingField === `${provider.name}-${fieldKey}` ? (
<input
type={isSecret ? 'password' : 'text'}
defaultValue={value}
placeholder={placeholder}
className={classNames(
'flex-1 px-3 py-1.5 rounded-lg text-sm',
'bg-bolt-elements-background-depth-3 border border-bolt-elements-borderColor',
'text-bolt-elements-textPrimary placeholder-bolt-elements-textTertiary',
'focus:outline-none focus:ring-2 focus:ring-purple-500/30',
'transition-all duration-200',
)}
onKeyDown={(e) => {
if (e.key === 'Enter') {
handleUpdateProviderSetting(provider, fieldKey, e.currentTarget.value);
} else if (e.key === 'Escape') {
setEditingField(null);
setEditingProvider(null);
}
}}
onBlur={(e) => handleUpdateProviderSetting(provider, fieldKey, e.target.value)}
autoFocus
/>
) : (
<div
className="flex-1 px-3 py-1.5 rounded-lg text-sm cursor-pointer group/url"
onClick={() => {
setEditingProvider(provider.name); // Keep track of provider being edited for general purposes
setEditingField(`${provider.name}-${fieldKey}`); // Set the specific field being edited
onEditClick(fieldKey); // Propagate edit click if needed
}}
>
<div className="flex items-center gap-2 text-bolt-elements-textSecondary">
{IconComponent && <IconComponent className="text-sm" />}
<span className="group-hover/url:text-purple-500 transition-colors">
{displayValue || placeholder}
</span>
</div>
</div>
)}
</div>
);
};
return (
<div className="space-y-6">
<motion.div
@ -234,9 +350,12 @@ const CloudProvidersTab = () => {
animate={{ opacity: 1, height: 'auto' }}
exit={{ opacity: 0, height: 0 }}
transition={{ duration: 0.2 }}
className="space-y-2" // Add space between inputs if multiple appear
>
{/* Base URL input - existing logic */}
{URL_CONFIGURABLE_PROVIDERS.includes(provider.name) && (
<div className="flex items-center gap-2 mt-4">
{editingProvider === provider.name ? (
{editingProvider === provider.name && editingField === `${provider.name}-baseUrl` ? (
<input
type="text"
defaultValue={provider.settings.baseUrl}
@ -253,6 +372,7 @@ const CloudProvidersTab = () => {
handleUpdateBaseUrl(provider, e.currentTarget.value);
} else if (e.key === 'Escape') {
setEditingProvider(null);
setEditingField(null);
}
}}
onBlur={(e) => handleUpdateBaseUrl(provider, e.target.value)}
@ -261,23 +381,97 @@ const CloudProvidersTab = () => {
) : (
<div
className="flex-1 px-3 py-1.5 rounded-lg text-sm cursor-pointer group/url"
onClick={() => setEditingProvider(provider.name)}
onClick={() => {
setEditingProvider(provider.name);
setEditingField(`${provider.name}-baseUrl`);
}}
>
<div className="flex items-center gap-2 text-bolt-elements-textSecondary">
<div className="i-ph:link text-sm" />
<span className="group-hover/url:text-purple-500 transition-colors">
{provider.settings.baseUrl || 'Click to set base URL'}
{provider.settings.baseUrl || `Click to set ${provider.name} base URL`}
</span>
</div>
</div>
)}
</div>
)}
{providerBaseUrlEnvKeys[provider.name]?.baseUrlKey && (
{/* Provider-specific fields */}
{provider.name === 'AzureOpenAI' && (
<>
<EditableInput
provider={provider}
fieldKey="apiKey"
placeholder="Enter API Key"
IconComponent={VscKey}
isSecret
currentEditingField={editingField}
onEditClick={() => setEditingField(`${provider.name}-apiKey`)}
/>
<EditableInput
provider={provider}
fieldKey="deploymentName"
placeholder="Enter Deployment Name"
IconComponent={TbBoxModel2}
currentEditingField={editingField}
onEditClick={() => setEditingField(`${provider.name}-deploymentName`)}
/>
</>
)}
{provider.name === 'VertexAI' && (
<>
<EditableInput
provider={provider}
fieldKey="projectId"
placeholder="Enter Project ID"
IconComponent={AiOutlineProject}
currentEditingField={editingField}
onEditClick={() => setEditingField(`${provider.name}-projectId`)}
/>
<EditableInput
provider={provider}
fieldKey="region"
placeholder="Enter Region (e.g., us-central1)"
IconComponent={GrLocation}
currentEditingField={editingField}
onEditClick={() => setEditingField(`${provider.name}-region`)}
/>
<p className="text-xs text-bolt-elements-textTertiary mt-1 px-3">
Vertex AI typically uses Application Default Credentials (ADC). API key field may be used for service account JSON.
</p>
</>
)}
{provider.name === 'GraniteAI' && (
<EditableInput
provider={provider}
fieldKey="apiKey"
placeholder="Enter API Key"
IconComponent={VscKey}
isSecret
currentEditingField={editingField}
onEditClick={() => setEditingField(`${provider.name}-apiKey`)}
/>
)}
{/* Display .env message if applicable for baseUrl */}
{URL_CONFIGURABLE_PROVIDERS.includes(provider.name) && providerBaseUrlEnvKeys[provider.name]?.baseUrlKey && (
<div className="mt-2 text-xs text-green-500">
<div className="flex items-center gap-1">
<div className="flex items-center gap-1 px-3"> {/* Added px-3 for alignment */}
<div className="i-ph:info" />
<span>Environment URL set in .env file</span>
<span>Base URL can be set via .env: {providerBaseUrlEnvKeys[provider.name]?.baseUrlKey}</span>
</div>
</div>
)}
{/* Display .env message for API keys if applicable */}
{providerBaseUrlEnvKeys[provider.name]?.apiTokenKey &&
(provider.name !== 'VertexAI') && /* Vertex AI handles auth differently */ (
<div className="mt-1 text-xs text-green-500">
<div className="flex items-center gap-1 px-3"> {/* Added px-3 for alignment */}
<div className="i-ph:info" />
<span>API Key can be set via .env: {providerBaseUrlEnvKeys[provider.name]?.apiTokenKey}</span>
</div>
</div>
)}

View File

@ -0,0 +1,108 @@
import React, { useState, useCallback } from 'react';
const FileUpload: React.FC = () => {
const [selectedFile, setSelectedFile] = useState<File | null>(null);
const [message, setMessage] = useState<string>('');
const [isUploading, setIsUploading] = useState<boolean>(false);
const handleFileChange = (event: React.ChangeEvent<HTMLInputElement>) => {
if (event.target.files && event.target.files[0]) {
setSelectedFile(event.target.files[0]);
setMessage(''); // Clear previous messages
} else {
setSelectedFile(null);
}
};
const handleUpload = useCallback(async () => {
if (!selectedFile) {
setMessage('Please select a file first.');
return;
}
setIsUploading(true);
setMessage('Uploading...');
const formData = new FormData();
formData.append('document', selectedFile);
try {
const response = await fetch('/api/document-upload', {
method: 'POST',
body: formData,
// Headers like 'Content-Type': 'multipart/form-data' are usually set automatically by the browser for FormData.
// Add CSRF tokens or other custom headers if your application requires them.
});
// Try to parse JSON regardless of response.ok to get error details from body
const result = await response.json();
if (!response.ok) {
// Use error message from server response if available
throw new Error(result.error || `Upload failed with status: ${response.status}`);
}
setMessage(result.message || 'File uploaded successfully!');
setSelectedFile(null); // Clear selection after successful upload
// Clear the file input visually (this is a common trick)
const fileInput = document.getElementById('file-upload-input') as HTMLInputElement;
if (fileInput) {
fileInput.value = '';
}
} catch (error) {
setMessage(error instanceof Error ? error.message : 'Error uploading file.');
console.error('Upload error:', error);
} finally {
setIsUploading(false);
}
}, [selectedFile]);
return (
<div style={{ padding: '20px', fontFamily: 'Arial, sans-serif', border: '1px solid #ccc', borderRadius: '8px', maxWidth: '500px', margin: '20px auto' }}>
<h3 style={{ marginTop: 0, marginBottom: '15px', color: '#333' }}>Upload Document for Knowledge Base</h3>
<input
id="file-upload-input"
type="file"
onChange={handleFileChange}
accept=".txt,.pdf,.md,.zip,.json,.csv,.docx,.pptx,.xlsx" // Added more common types
style={{ marginBottom: '10px', display: 'block' }}
disabled={isUploading}
/>
{selectedFile && (
<p style={{ fontSize: '0.9em', color: '#555', margin: '10px 0' }}>
Selected file: {selectedFile.name} ({(selectedFile.size / 1024).toFixed(2)} KB)
</p>
)}
<button
onClick={handleUpload}
disabled={!selectedFile || isUploading}
style={{
padding: '10px 15px',
backgroundColor: isUploading ? '#ccc' : '#007bff',
color: 'white',
border: 'none',
borderRadius: '4px',
cursor: (isUploading || !selectedFile) ? 'not-allowed' : 'pointer',
opacity: (isUploading || !selectedFile) ? 0.6 : 1,
}}
>
{isUploading ? 'Uploading...' : 'Upload'}
</button>
{message && (
<p style={{
marginTop: '15px',
padding: '10px',
backgroundColor: message.startsWith('Error') || message.startsWith('Upload failed') ? '#ffebee' : '#e8f5e9',
color: message.startsWith('Error') || message.startsWith('Upload failed') ? '#c62828' : '#2e7d32',
borderRadius: '4px',
fontSize: '0.9em',
}}>
{message}
</p>
)}
</div>
);
};
export default FileUpload;

View File

@ -25,6 +25,7 @@ export * from './CloseButton';
export * from './CodeBlock';
export * from './EmptyState';
export * from './FileIcon';
export * from './FileUpload'; // Added FileUpload export
export * from './FilterChip';
export * from './GradientCard';
export * from './RepositoryStats';

View File

@ -0,0 +1,109 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createAzureOpenAI } from '@ai-sdk/openai';
export default class AzureOpenAIProvider extends BaseProvider {
name = 'AzureOpenAI';
getApiKeyLink = 'https://azure.microsoft.com/en-us/services/cognitive-services/openai-service/';
// Configuration keys for .env overrides or direct settings.
config = {
apiTokenKey: 'AZURE_OPENAI_API_KEY',
baseUrlKey: 'AZURE_OPENAI_ENDPOINT',
deploymentNameKey: 'AZURE_OPENAI_DEPLOYMENT_NAME',
apiVersionKey: 'AZURE_OPENAI_API_VERSION', // Not a standard BaseProvider key, custom for Azure
};
staticModels: ModelInfo[] = []; // Models are dynamic based on deployment
constructor() {
super();
// Constructor is light, config is applied in methods using providerSettings
}
private getAzureConfig(settings?: IProviderSetting): {
apiKey: string;
endpoint: string;
deploymentName: string;
apiVersion: string;
} {
const apiKey = settings?.apiKey || this.getEnv(this.config.apiTokenKey) || '';
const endpoint = settings?.baseUrl || this.getEnv(this.config.baseUrlKey) || '';
const deploymentName = settings?.deploymentName || this.getEnv(this.config.deploymentNameKey) || '';
// Ensure apiVersion has a default if not provided in settings or .env
const apiVersion = settings?.apiVersion || this.getEnv(this.config.apiVersionKey) || '2023-05-15';
if (!apiKey) throw new Error(`Azure OpenAI API key is missing for provider ${this.name}.`);
if (!endpoint) throw new Error(`Azure OpenAI endpoint (baseUrl) is missing for provider ${this.name}.`);
if (!deploymentName) throw new Error(`Azure OpenAI deployment name is missing for provider ${this.name}.`);
return { apiKey, endpoint, deploymentName, apiVersion };
}
async getDynamicModels( // Renamed from getModels to align with LLMManager
_apiKeys?: Record<string, string>, // apiKeys can be sourced via settings if needed
settings?: IProviderSetting,
_serverEnv?: Record<string, string>,
): Promise<ModelInfo[]> {
// serverEnv can be accessed via this.getEnv() if BaseProvider initializes it.
// For Azure, the "model" is the deployment.
try {
const config = this.getAzureConfig(settings);
if (config.deploymentName) {
return [
{
name: config.deploymentName, // Use deployment name as the model identifier
label: `${config.deploymentName} (Azure Deployment)`,
provider: this.name,
maxTokenAllowed: 8000, // This is a default; ideally, it might come from Azure or be configurable.
},
];
}
} catch (error) {
// If config is incomplete, provider is not usable, return no models.
// console.error("Azure OpenAI getModels config error:", error.message);
return [];
}
return [];
}
// BaseProvider has a getDynamicModels. If we override getModels,
// we might not need getDynamicModels here unless BaseProvider strictly calls it.
// For now, assuming getModels will be called by the manager logic for this provider.
getModelInstance(options: {
model: string; // This will be the deploymentName for Azure
serverEnv?: Env; // Access via this.getEnv() if needed
apiKeys?: Record<string, string>; // Access via settings if needed
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const azureSettings = options.providerSettings?.[this.name];
if (!azureSettings) {
throw new Error(`Configuration settings for ${this.name} are missing.`);
}
const { apiKey, endpoint, deploymentName, apiVersion } = this.getAzureConfig(azureSettings);
// The 'model' parameter in options.model is the one selected in UI, which should be our deploymentName.
// The 'deployment' parameter for createAzureOpenAI should be this deploymentName.
// The model passed to the returned azure() instance is also this deploymentName,
// as Azure uses the deployment to determine the underlying model.
if (options.model !== deploymentName) {
// This might indicate a mismatch if multiple "deployments" were somehow listed for Azure.
// For our current getModels, this shouldn't happen as we only list the single configured deployment.
console.warn(`AzureOpenAI: Model selected (${options.model}) differs from configured deployment (${deploymentName}). Using selected model for SDK call.`);
}
const azure = createAzureOpenAI({
endpoint,
apiKey,
apiVersion,
deployment: options.model, // Use the model string passed, which is the deployment name
});
// The SDK instance is called with the model name (which is the deployment name here)
return azure(options.model);
}
}

View File

@ -0,0 +1,153 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
// We are not using a specific AI SDK for Granite, so no 'ai' package imports here for model instantiation.
export interface GraniteAIProviderOptions {
model: string;
prompt: string;
stream?: boolean;
providerSettings?: IProviderSetting; // Re-using IProviderSetting for consistency
signal?: AbortSignal;
}
export default class GraniteAIProvider extends BaseProvider {
name = 'GraniteAI';
// TODO: Update with actual link if available
getApiKeyLink = 'https://www.ibm.com/granite'; // Placeholder
config = {
apiTokenKey: 'GRANITE_AI_API_KEY',
baseUrlKey: 'GRANITE_AI_BASE_URL',
};
staticModels: ModelInfo[] = []; // Will be populated by getDynamicModels
constructor() {
super();
// Constructor is light, config is applied in methods.
}
private getGraniteConfig(settings?: IProviderSetting): {
apiKey: string;
baseUrl: string;
} {
const apiKey = settings?.apiKey || this.getEnv(this.config.apiTokenKey) || '';
const baseUrl = settings?.baseUrl || this.getEnv(this.config.baseUrlKey) || '';
if (!apiKey) {
console.warn(`Granite AI API key is missing for provider ${this.name}.`);
// throw new Error(`Granite AI API key is missing for provider ${this.name}.`);
}
if (!baseUrl) {
console.warn(`Granite AI Base URL is missing for provider ${this.name}.`);
// throw new Error(`Granite AI Base URL is missing for provider ${this.name}.`);
}
return { apiKey, baseUrl };
}
async getDynamicModels(
_apiKeys?: Record<string, string>,
settings?: IProviderSetting,
_serverEnv?: Record<string, string>,
): Promise<ModelInfo[]> {
const config = this.getGraniteConfig(settings);
// Ensure config is present, even if not used for this hardcoded list yet
if (!config.apiKey || !config.baseUrl) {
// Provider not configured, return no models
return [];
}
return [
{
id: 'granite-model-example', // Example model ID
name: 'Granite Model (Example)',
provider: this.name,
maxTokenAllowed: 8000, // Example token limit
},
// Add other Granite models if known
];
}
// This generate method is specific to GraniteAIProvider and uses fetch directly.
// It does not return a LanguageModelV1 instance from the 'ai' SDK.
async generate(options: GraniteAIProviderOptions): Promise<string> {
const { model, prompt, stream, providerSettings, signal } = options;
const { apiKey, baseUrl } = this.getGraniteConfig(providerSettings);
if (!apiKey || !baseUrl) {
throw new Error(`Granite AI provider is not configured. Missing API key or base URL.`);
}
// TODO: Confirm the actual API endpoint for Granite AI
const apiEndpoint = `${baseUrl}/v1/chat/completions`; // Common pattern, adjust if needed
const payload = {
model: model,
messages: [{ role: 'user', content: prompt }],
stream: stream || false, // Default to non-streaming
};
// TODO: Implement actual streaming support if required by the application.
// For now, stream: false is hardcoded in payload effectively,
// and we will parse a JSON response.
if (stream) {
console.warn('GraniteAIProvider: Streaming requested but not fully implemented. Returning non-streamed response.');
// For true streaming, would return response.body ReadableStream here,
// and the caller would need to handle it (e.g. using AI SDK's stream processing).
}
const response = await fetch(apiEndpoint, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${apiKey}`,
},
body: JSON.stringify(payload),
signal: signal,
});
if (!response.ok) {
const errorBody = await response.text();
throw new Error(`Granite AI API request failed with status ${response.status} ${response.statusText}: ${errorBody}`);
}
const jsonResponse = await response.json();
// TODO: Adjust based on actual Granite AI response structure.
// Common paths: choices[0].message.content or choices[0].text
const messageContent = jsonResponse.choices?.[0]?.message?.content || jsonResponse.choices?.[0]?.text;
if (typeof messageContent !== 'string') {
console.error('Granite AI response format unexpected:', jsonResponse);
throw new Error('Granite AI provider received an unexpected response format.');
}
return messageContent;
}
// getModelInstance is typically for AI SDK integration.
// Since Granite is using fetch directly via `generate`, this might not be needed
// or would need to return a custom object that wraps `generate`.
// For this subtask, we are focusing on the direct `generate` method.
/*
getModelInstance(options: {
model: string;
providerSettings?: Record<string, IProviderSetting>;
}): any { // Return type would need to be compatible with how LLMManager uses it
// This would need to return an object that has methods expected by the calling code,
// potentially wrapping the `this.generate` call.
// For example:
// return {
// generate: async (promptContent: string) => this.generate({
// model: options.model,
// prompt: promptContent,
// providerSettings: options.providerSettings?.[this.name]
// })
// };
throw new Error("getModelInstance not implemented for GraniteAIProvider in this manner. Use generate().");
}
*/
}

View File

@ -0,0 +1,99 @@
import { BaseProvider } from '~/lib/modules/llm/base-provider';
import type { ModelInfo } from '~/lib/modules/llm/types';
import type { IProviderSetting } from '~/types/model';
import type { LanguageModelV1 } from 'ai';
import { createVertex } from '@ai-sdk/google/vertex'; // Updated import for Vertex AI
export default class VertexAIProvider extends BaseProvider {
name = 'VertexAI';
getApiKeyLink = 'https://cloud.google.com/vertex-ai/docs/start/authentication'; // Updated link
// Configuration keys for .env overrides or direct settings.
// Vertex AI primarily uses Application Default Credentials (ADC).
// An explicit API key is not typically used for SDK authentication with Vertex.
// However, these settings keys are for project and region.
config = {
projectIdKey: 'VERTEX_AI_PROJECT_ID',
regionKey: 'VERTEX_AI_REGION',
// apiTokenKey could be GOOGLE_APPLICATION_CREDENTIALS path, but SDK handles ADC.
};
staticModels: ModelInfo[] = []; // Models will be listed in getDynamicModels
constructor() {
super();
// Constructor is light; config is applied in methods using providerSettings.
}
private getVertexConfig(settings?: IProviderSetting): {
projectId: string;
region: string;
} {
const projectId = settings?.projectId || this.getEnv(this.config.projectIdKey) || '';
const region = settings?.region || this.getEnv(this.config.regionKey) || '';
if (!projectId) {
console.warn(`Vertex AI Project ID is missing for provider ${this.name}.`);
// Depending on strictness, could throw an error here.
// throw new Error(`Vertex AI Project ID is missing for provider ${this.name}.`);
}
if (!region) {
console.warn(`Vertex AI Region is missing for provider ${this.name}.`);
// throw new Error(`Vertex AI Region is missing for provider ${this.name}.`);
}
return { projectId, region };
}
async getDynamicModels(
_apiKeys?: Record<string, string>,
settings?: IProviderSetting,
_serverEnv?: Record<string, string>,
): Promise<ModelInfo[]> {
const { projectId, region } = this.getVertexConfig(settings);
// For now, returning a hardcoded list.
// Actual dynamic model fetching for Vertex might require API calls if desired later.
// This call ensures that project ID and region are checked/logged if missing.
if (!projectId || !region) {
// If essential config is missing, might return empty or throw.
// For now, still returning hardcoded list but warnings are shown.
}
return [
{ name: 'gemini-1.5-pro-preview-0409', label: 'Gemini 1.5 Pro (latest preview)', provider: this.name, maxTokenAllowed: 1048576 }, // Example token limit
{ name: 'gemini-1.0-pro', label: 'Gemini 1.0 Pro', provider: this.name, maxTokenAllowed: 32768 },
{ name: 'gemini-1.0-pro-vision', label: 'Gemini 1.0 Pro Vision', provider: this.name, maxTokenAllowed: 16384 },
{ name: 'gemini-flash-preview-0514', label: 'Gemini 1.5 Flash (latest preview)', provider: this.name, maxTokenAllowed: 1048576 },
// Add other relevant models here
].map(m => ({ ...m, id: m.name })); // Ensure 'id' field is present if BaseProvider or manager expects it
}
getModelInstance(options: {
model: string; // This will be the Vertex AI model ID e.g., 'gemini-1.0-pro'
serverEnv?: Env;
apiKeys?: Record<string, string>;
providerSettings?: Record<string, IProviderSetting>;
}): LanguageModelV1 {
const vertexSettings = options.providerSettings?.[this.name];
if (!vertexSettings) {
throw new Error(`Configuration settings for ${this.name} are missing.`);
}
const { projectId, region } = this.getVertexConfig(vertexSettings);
if (!projectId || !region) {
throw new Error(`Vertex AI Project ID or Region is not configured for provider ${this.name}. Cannot instantiate model.`);
}
const vertex = createVertex({
project: projectId,
location: region,
// The SDK should handle ADC for authentication.
// If a service account key JSON is used, it's typically set via GOOGLE_APPLICATION_CREDENTIALS env var.
});
// options.model is the specific model identifier like 'gemini-1.0-pro'
return vertex(options.model);
}
}

View File

@ -15,25 +15,31 @@ import TogetherProvider from './providers/together';
import XAIProvider from './providers/xai';
import HyperbolicProvider from './providers/hyperbolic';
import AmazonBedrockProvider from './providers/amazon-bedrock';
import AzureOpenAIProvider from './providers/azure-openai';
import GithubProvider from './providers/github';
import GraniteAIProvider from './providers/granite-ai'; // Added GraniteAIProvider
import VertexAIProvider from './providers/vertex-ai';
export {
AmazonBedrockProvider,
AnthropicProvider,
AzureOpenAIProvider,
CohereProvider,
DeepseekProvider,
GoogleProvider,
GraniteAIProvider, // Added GraniteAIProvider here for alphabetical order
GroqProvider,
HuggingFaceProvider,
HyperbolicProvider,
LMStudioProvider,
MistralProvider,
OllamaProvider,
OpenAIProvider,
OpenRouterProvider,
OpenAILikeProvider,
PerplexityProvider,
XAIProvider,
TogetherProvider,
LMStudioProvider,
AmazonBedrockProvider,
VertexAIProvider,
XAIProvider,
GithubProvider,
};

View File

@ -29,7 +29,7 @@ export interface Shortcuts {
toggleTerminal: Shortcut;
}
export const URL_CONFIGURABLE_PROVIDERS = ['Ollama', 'LMStudio', 'OpenAILike'];
export const URL_CONFIGURABLE_PROVIDERS = ['Ollama', 'LMStudio', 'OpenAILike', 'AzureOpenAI', 'GraniteAI'];
export const LOCAL_PROVIDERS = ['OpenAILike', 'LMStudio', 'Ollama'];
export type ProviderSetting = Record<string, IProviderConfig>;
@ -77,6 +77,87 @@ const getInitialProviderSettings = (): ProviderSetting => {
};
});
// Add new providers if not already present from PROVIDER_LIST
// This is a workaround as LLMManager modification is out of scope for this subtask
if (!initialSettings.AzureOpenAI) {
initialSettings.AzureOpenAI = {
name: 'AzureOpenAI',
icon: 'AzureOpenAIIcon', // Placeholder icon
config: {
apiTokenKey: 'AZURE_OPENAI_API_KEY',
baseUrlKey: 'AZURE_OPENAI_ENDPOINT',
// Azure specific fields
deploymentNameKey: 'AZURE_OPENAI_DEPLOYMENT_NAME',
},
settings: {
enabled: false,
apiKey: '',
baseUrl: '',
deploymentName: '',
apiVersion: '2023-05-15', // Added apiVersion with a default
projectId: '', // Not used by Azure, but part of a common structure
region: '', // Not used by Azure, but part of a common structure
},
getApiKeyLink: 'https://azure.microsoft.com/en-us/services/cognitive-services/openai-service/',
labelForGetApiKey: 'Get Azure OpenAI API Key',
staticModels: [],
getDynamicModels: false,
provider: 'azure', // Assuming a provider identifier
isLocal: false, // Assuming it's a cloud provider
};
}
if (!initialSettings.VertexAI) {
initialSettings.VertexAI = {
name: 'VertexAI',
icon: 'VertexAIIcon', // Placeholder icon
config: {
// Vertex AI uses ADC or service account keys, not a direct API key env variable for the token
// apiTokenKey: 'GOOGLE_APPLICATION_CREDENTIALS', // Or handle differently
// No single base URL for Vertex AI in the same way as others
projectIdKey: 'VERTEX_AI_PROJECT_ID',
regionKey: 'VERTEX_AI_REGION',
},
settings: {
enabled: false,
apiKey: '', // Might represent service account key path or be handled differently
baseUrl: '', // Not applicable in the same way
projectId: '',
region: '',
deploymentName: '', // Not typically used by Vertex
},
getApiKeyLink: 'https://cloud.google.com/vertex-ai/docs/start/authentication',
labelForGetApiKey: 'Configure Vertex AI Authentication',
staticModels: [],
getDynamicModels: false,
provider: 'google', // Assuming a provider identifier
isLocal: false, // Assuming it's a cloud provider
};
}
if (!initialSettings.GraniteAI) {
initialSettings.GraniteAI = {
name: 'GraniteAI',
icon: 'GraniteAIIcon', // Placeholder icon
config: {
apiTokenKey: 'GRANITE_AI_API_KEY',
baseUrlKey: 'GRANITE_AI_BASE_URL',
},
settings: {
enabled: false,
apiKey: '',
baseUrl: '',
projectId: '', // Not used by Granite, but part of a common structure
region: '', // Not used by Granite, but part of a common structure
deploymentName: '', // Not used by Granite
},
getApiKeyLink: 'https://www.granite.com/ai/api-keys', // Placeholder URL
labelForGetApiKey: 'Get Granite AI API Key',
staticModels: [],
getDynamicModels: false,
provider: 'granite', // Assuming a provider identifier
isLocal: false, // Assuming it's a cloud provider
};
}
// Only try to load from localStorage in the browser
if (isBrowser) {
const savedSettings = localStorage.getItem(PROVIDER_SETTINGS_KEY);

View File

@ -0,0 +1,55 @@
import { type ActionFunctionArgs, json } from '@remix-run/cloudflare';
import { createScopedLogger } from '~/utils/logger'; // Assuming this utility exists
const logger = createScopedLogger('api.document-upload');
export async function action({ request, context }: ActionFunctionArgs) {
if (request.method !== 'POST') {
logger.warn(`Method not allowed: ${request.method}`);
return json({ error: 'Method not allowed' }, { status: 405 });
}
try {
// Accessing cloudflare bindings if needed (e.g. for R2 storage later)
// const env = context.cloudflare?.env as any;
const formData = await request.formData();
const file = formData.get('document');
// Validate that 'document' is a File object
if (!(file instanceof File)) {
logger.warn('No file found in upload or "document" is not a File object.');
return json({ error: 'No document found in upload or it is not a file.' }, { status: 400 });
}
// Basic check for file name, size (can add more checks like type if needed)
if (!file.name || file.size === 0) {
logger.warn(`Invalid file properties: Name: ${file.name}, Size: ${file.size}`);
return json({ error: 'Invalid file. Name or size is missing.' }, { status: 400 });
}
logger.info(`Received file upload. Name: ${file.name}, Type: ${file.type}, Size: ${file.size} bytes.`);
// TODO: Implement actual file storage (e.g., to Supabase Storage, Cloudflare R2 using `env.YOUR_R2_BUCKET.put(...)`)
// TODO: Implement file processing (e.g., parsing, embedding for RAG)
// For now, just acknowledging receipt and returning metadata.
return json({
message: `File '${file.name}' received and acknowledged. Processing and knowledge base integration are pending.`,
filename: file.name,
size: file.size,
type: file.type,
});
} catch (error) {
logger.error('Error processing document upload:', error);
// Check if the error is from formData parsing or other issues
if (error instanceof Error && error.message.includes('Failed to parse multipart body')) {
return json({ error: 'Invalid request body. Ensure it is a multipart/form-data request.'}, { status: 400 });
}
return json({
error: 'Failed to process document upload.',
details: (error instanceof Error ? error.message : String(error))
}, { status: 500 });
}
}

View File

@ -2,9 +2,10 @@ import { type ActionFunctionArgs } from '@remix-run/cloudflare';
import { streamText } from '~/lib/.server/llm/stream-text';
import type { IProviderSetting, ProviderInfo } from '~/types/model';
import { generateText } from 'ai';
import { PROVIDER_LIST } from '~/utils/constants';
import { PROVIDER_LIST } from '~/utils/constants'; // PROVIDER_LIST might be less relevant if using LLMManager fully
import { MAX_TOKENS } from '~/lib/.server/llm/constants';
import { LLMManager } from '~/lib/modules/llm/manager';
import GraniteAIProvider from '~/lib/modules/llm/providers/granite-ai'; // Import GraniteAIProvider
import type { ModelInfo } from '~/lib/modules/llm/types';
import { getApiKeysFromCookie, getProviderSettingsFromCookie } from '~/lib/api/cookies';
import { createScopedLogger } from '~/utils/logger';
@ -103,39 +104,63 @@ async function llmCallAction({ context, request }: ActionFunctionArgs) {
const dynamicMaxTokens = modelDetails && modelDetails.maxTokenAllowed ? modelDetails.maxTokenAllowed : MAX_TOKENS;
const providerInfo = PROVIDER_LIST.find((p) => p.name === provider.name);
// Get LLMManager instance and the actual provider instance
const llmManager = LLMManager.getInstance(context.cloudflare?.env as any);
const actualProviderInstance = llmManager.getProvider(provider.name);
if (!providerInfo) {
throw new Error('Provider not found');
if (!actualProviderInstance) {
// This check replaces the old providerInfo check using PROVIDER_LIST
throw new Error(`Provider ${provider.name} not found or not registered in LLMManager.`);
}
logger.info(`Generating response Provider: ${provider.name}, Model: ${modelDetails.name}`);
logger.info(`Generating response with Provider: ${provider.name}, Model: ${modelDetails.name}`);
const result = await generateText({
system,
messages: [
{
role: 'user',
content: `${message}`,
},
],
model: providerInfo.getModelInstance({
if (actualProviderInstance instanceof GraniteAIProvider) {
logger.info(`Using GraniteAIProvider direct generate for Model: ${modelDetails.name}`);
const graniteResultText = await actualProviderInstance.generate({
model: modelDetails.name,
serverEnv: context.cloudflare?.env as any,
apiKeys,
providerSettings,
}),
maxTokens: dynamicMaxTokens,
toolChoice: 'none',
});
logger.info(`Generated response`);
prompt: message,
providerSettings: providerSettings?.[provider.name],
// signal: request.signal, // Pass signal if needed
});
return new Response(JSON.stringify(result), {
status: 200,
headers: {
'Content-Type': 'application/json',
},
});
const responsePayload = {
text: graniteResultText,
toolCalls: [],
finishReason: 'stop', // Or derive from actual Granite response if available
usage: { promptTokens: 0, completionTokens: 0, totalTokens: 0 } // Placeholder
};
return new Response(JSON.stringify(responsePayload), {
status: 200,
headers: { 'Content-Type': 'application/json' },
});
} else if (typeof actualProviderInstance.getModelInstance === 'function') {
logger.info(`Using AI SDK generateText for Provider: ${provider.name}, Model: ${modelDetails.name}`);
const result = await generateText({
system,
messages: [ { role: 'user', content: `${message}` } ],
model: actualProviderInstance.getModelInstance({ // Use actualProviderInstance here
model: modelDetails.name,
serverEnv: context.cloudflare?.env as any,
apiKeys,
providerSettings, // Pass the whole providerSettings object
}),
maxTokens: dynamicMaxTokens,
toolChoice: 'none',
});
logger.info(`Generated response with AI SDK`);
return new Response(JSON.stringify(result), {
status: 200,
headers: { 'Content-Type': 'application/json' },
});
} else {
logger.error(`Provider ${provider.name} does not have a getModelInstance method and is not GraniteAIProvider.`);
throw new Response(`Provider ${provider.name} is not configured correctly for generating text.`, {
status: 500,
statusText: 'Internal Server Error'
});
}
} catch (error: unknown) {
console.log(error);

View File

@ -22,6 +22,22 @@ PROVIDER_LIST.forEach((provider) => {
};
});
// Manually add AzureOpenAI and GraniteAI if not already present
// This is a workaround as LLMManager modification is out of scope for this subtask
if (!providerBaseUrlEnvKeys.AzureOpenAI) {
providerBaseUrlEnvKeys.AzureOpenAI = {
baseUrlKey: 'AZURE_OPENAI_ENDPOINT',
apiTokenKey: 'AZURE_OPENAI_API_KEY', // Assuming this is the API token key for Azure
};
}
if (!providerBaseUrlEnvKeys.GraniteAI) {
providerBaseUrlEnvKeys.GraniteAI = {
baseUrlKey: 'GRANITE_AI_BASE_URL',
apiTokenKey: 'GRANITE_AI_API_KEY', // Assuming this is the API token key for Granite
};
}
// VertexAI does not have a base URL key in the same way, so it's omitted here as per instructions.
// starter Templates
export const STARTER_TEMPLATES: Template[] = [

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
<rect width="100" height="100" fill="#0072C6"/>
<text x="50%" y="50%" dominant-baseline="middle" text-anchor="middle" font-family="Arial" font-size="40" fill="white">AZ</text>
</svg>

After

Width:  |  Height:  |  Size: 250 B

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
<rect width="100" height="100" fill="#696969"/>
<text x="50%" y="50%" dominant-baseline="middle" text-anchor="middle" font-family="Arial" font-size="40" fill="white">GR</text>
</svg>

After

Width:  |  Height:  |  Size: 250 B

View File

@ -0,0 +1,4 @@
<svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 100 100">
<rect width="100" height="100" fill="#4285F4"/>
<text x="50%" y="50%" dominant-baseline="middle" text-anchor="middle" font-family="Arial" font-size="40" fill="white">VX</text>
</svg>

After

Width:  |  Height:  |  Size: 250 B