mirror of
https://github.com/Mintplex-Labs/anything-llm
synced 2026-04-25 17:15:37 +02:00
Microsoft Foundry Local LLM provider & agent provider (#4435)
* add microsoft foundry local llm and agent providers * minor change to fix early stop token + overloading of context window always use user defined window _unless_ it is larger than the models real contenxt window cache the context windows when we can from the API (0.7.*)+ Unload model forcefully on model change to prevent resource hogging * add back token preference since some models have very large windows and can crash a machine normalize cases --------- Co-authored-by: Timothy Carambat <rambat1010@gmail.com>
This commit is contained in:
@@ -102,7 +102,9 @@ AnythingLLM divides your documents into objects called `workspaces`. A Workspace
|
||||
- [Novita AI (chat models)](https://novita.ai/model-api/product/llm-api?utm_source=github_anything-llm&utm_medium=github_readme&utm_campaign=link)
|
||||
- [PPIO](https://ppinfra.com?utm_source=github_anything-llm)
|
||||
- [Moonshot AI](https://www.moonshot.ai/)
|
||||
- [Microsoft Foundry Local](https://github.com/microsoft/Foundry-Local)
|
||||
- [CometAPI (chat models)](https://api.cometapi.com/)
|
||||
|
||||
**Embedder models:**
|
||||
|
||||
- [AnythingLLM Native Embedder](/server/storage/models/README.md) (default)
|
||||
|
||||
@@ -144,6 +144,11 @@ GID='1000'
|
||||
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
|
||||
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
|
||||
|
||||
# LLM_PROVIDER='foundry'
|
||||
# FOUNDRY_BASE_PATH='http://127.0.0.1:55776'
|
||||
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
|
||||
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
|
||||
|
||||
###########################################
|
||||
######## Embedding API SElECTION ##########
|
||||
###########################################
|
||||
|
||||
110
frontend/src/components/LLMSelection/FoundryOptions/index.jsx
Normal file
110
frontend/src/components/LLMSelection/FoundryOptions/index.jsx
Normal file
@@ -0,0 +1,110 @@
|
||||
import { useEffect, useState } from "react";
|
||||
import System from "@/models/system";
|
||||
|
||||
export default function FoundryOptions({ settings }) {
|
||||
const [models, setModels] = useState([]);
|
||||
const [loading, setLoading] = useState(!!settings?.FoundryBasePath);
|
||||
const [basePath, setBasePath] = useState(settings?.FoundryBasePath);
|
||||
const [model, setModel] = useState(settings?.FoundryModelPref || "");
|
||||
|
||||
useEffect(() => {
|
||||
setModel(settings?.FoundryModelPref || "");
|
||||
}, [settings?.FoundryModelPref]);
|
||||
|
||||
useEffect(() => {
|
||||
async function fetchModels() {
|
||||
try {
|
||||
setLoading(true);
|
||||
if (!basePath) throw new Error("Base path is required");
|
||||
const { models, error } = await System.customModels(
|
||||
"foundry",
|
||||
null,
|
||||
basePath
|
||||
);
|
||||
if (error) throw new Error(error);
|
||||
setModels(models);
|
||||
} catch (error) {
|
||||
console.error("Error fetching Foundry models:", error);
|
||||
setModels([]);
|
||||
} finally {
|
||||
setLoading(false);
|
||||
}
|
||||
}
|
||||
fetchModels();
|
||||
}, [basePath]);
|
||||
|
||||
return (
|
||||
<div className="flex flex-col gap-y-7">
|
||||
<div className="flex gap-[36px] mt-1.5 flex-wrap">
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Base URL
|
||||
</label>
|
||||
<input
|
||||
type="url"
|
||||
name="FoundryBasePath"
|
||||
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||
placeholder="eg: http://127.0.0.1:8080"
|
||||
defaultValue={settings?.FoundryBasePath}
|
||||
required={true}
|
||||
autoComplete="off"
|
||||
spellCheck={false}
|
||||
onChange={(e) => setBasePath(e.target.value)}
|
||||
/>
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Chat Model
|
||||
</label>
|
||||
{loading ? (
|
||||
<select
|
||||
name="FoundryModelPref"
|
||||
required={true}
|
||||
disabled={true}
|
||||
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||
>
|
||||
<option>---- Loading ----</option>
|
||||
</select>
|
||||
) : (
|
||||
<select
|
||||
name="FoundryModelPref"
|
||||
value={model}
|
||||
onChange={(e) => setModel(e.target.value)}
|
||||
required={true}
|
||||
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||
>
|
||||
{models.length > 0 ? (
|
||||
<>
|
||||
<option value="">-- Select a model --</option>
|
||||
{models.map((model) => (
|
||||
<option key={model.id} value={model.id}>
|
||||
{model.id}
|
||||
</option>
|
||||
))}
|
||||
</>
|
||||
) : (
|
||||
<option disabled value="">
|
||||
No models found
|
||||
</option>
|
||||
)}
|
||||
</select>
|
||||
)}
|
||||
</div>
|
||||
<div className="flex flex-col w-60">
|
||||
<label className="text-white text-sm font-semibold block mb-3">
|
||||
Token Context Window
|
||||
</label>
|
||||
<input
|
||||
type="number"
|
||||
name="FoundryModelTokenLimit"
|
||||
className="border-none bg-theme-settings-input-bg text-white placeholder:text-theme-settings-input-placeholder text-sm rounded-lg focus:outline-primary-button active:outline-primary-button outline-none block w-full p-2.5"
|
||||
placeholder="4096"
|
||||
defaultValue={settings?.FoundryModelTokenLimit}
|
||||
autoComplete="off"
|
||||
min={0}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
);
|
||||
}
|
||||
BIN
frontend/src/media/llmprovider/foundry-local.png
Normal file
BIN
frontend/src/media/llmprovider/foundry-local.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 18 KiB |
@@ -34,6 +34,7 @@ import PPIOLogo from "@/media/llmprovider/ppio.png";
|
||||
import DellProAiStudioLogo from "@/media/llmprovider/dpais.png";
|
||||
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
|
||||
import CometApiLogo from "@/media/llmprovider/cometapi.png";
|
||||
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
|
||||
|
||||
import PreLoader from "@/components/Preloader";
|
||||
import OpenAiOptions from "@/components/LLMSelection/OpenAiOptions";
|
||||
@@ -65,6 +66,7 @@ import NvidiaNimOptions from "@/components/LLMSelection/NvidiaNimOptions";
|
||||
import PPIOLLMOptions from "@/components/LLMSelection/PPIOLLMOptions";
|
||||
import DellProAiStudioOptions from "@/components/LLMSelection/DPAISOptions";
|
||||
import MoonshotAiOptions from "@/components/LLMSelection/MoonshotAiOptions";
|
||||
import FoundryOptions from "@/components/LLMSelection/FoundryOptions";
|
||||
|
||||
import LLMItem from "@/components/LLMSelection/LLMItem";
|
||||
import { CaretUpDown, MagnifyingGlass, X } from "@phosphor-icons/react";
|
||||
@@ -313,6 +315,18 @@ export const AVAILABLE_LLM_PROVIDERS = [
|
||||
description: "500+ AI Models all in one API.",
|
||||
requiredConfig: ["CometApiLLMApiKey"],
|
||||
},
|
||||
{
|
||||
name: "Microsoft Foundry Local",
|
||||
value: "foundry",
|
||||
logo: FoundryLogo,
|
||||
options: (settings) => <FoundryOptions settings={settings} />,
|
||||
description: "Run Microsoft's Foundry models locally.",
|
||||
requiredConfig: [
|
||||
"FoundryBasePath",
|
||||
"FoundryModelPref",
|
||||
"FoundryModelTokenLimit",
|
||||
],
|
||||
},
|
||||
{
|
||||
name: "xAI",
|
||||
value: "xai",
|
||||
|
||||
@@ -40,6 +40,7 @@ import PGVectorLogo from "@/media/vectordbs/pgvector.png";
|
||||
import DPAISLogo from "@/media/llmprovider/dpais.png";
|
||||
import MoonshotAiLogo from "@/media/llmprovider/moonshotai.png";
|
||||
import CometApiLogo from "@/media/llmprovider/cometapi.png";
|
||||
import FoundryLogo from "@/media/llmprovider/foundry-local.png";
|
||||
|
||||
import React, { useState, useEffect } from "react";
|
||||
import paths from "@/utils/paths";
|
||||
@@ -261,6 +262,13 @@ export const LLM_SELECTION_PRIVACY = {
|
||||
],
|
||||
logo: CometApiLogo,
|
||||
},
|
||||
foundry: {
|
||||
name: "Microsoft Foundry Local",
|
||||
description: [
|
||||
"Your model and chats are only accessible on the machine running Foundry Local",
|
||||
],
|
||||
logo: FoundryLogo,
|
||||
},
|
||||
};
|
||||
|
||||
export const VECTOR_DB_PRIVACY = {
|
||||
|
||||
@@ -143,6 +143,11 @@ SIG_SALT='salt' # Please generate random string at least 32 chars long.
|
||||
# MOONSHOT_AI_API_KEY='your-moonshot-api-key-here'
|
||||
# MOONSHOT_AI_MODEL_PREF='moonshot-v1-32k'
|
||||
|
||||
# LLM_PROVIDER='foundry'
|
||||
# FOUNDRY_BASE_PATH='http://127.0.0.1:55776'
|
||||
# FOUNDRY_MODEL_PREF='phi-3.5-mini'
|
||||
# FOUNDRY_MODEL_TOKEN_LIMIT=4096
|
||||
|
||||
###########################################
|
||||
######## Embedding API SElECTION ##########
|
||||
###########################################
|
||||
|
||||
@@ -569,6 +569,11 @@ const SystemSettings = {
|
||||
GenericOpenAiKey: !!process.env.GENERIC_OPEN_AI_API_KEY,
|
||||
GenericOpenAiMaxTokens: process.env.GENERIC_OPEN_AI_MAX_TOKENS,
|
||||
|
||||
// Foundry Keys
|
||||
FoundryBasePath: process.env.FOUNDRY_BASE_PATH,
|
||||
FoundryModelPref: process.env.FOUNDRY_MODEL_PREF,
|
||||
FoundryModelTokenLimit: process.env.FOUNDRY_MODEL_TOKEN_LIMIT,
|
||||
|
||||
AwsBedrockLLMConnectionMethod:
|
||||
process.env.AWS_BEDROCK_LLM_CONNECTION_METHOD || "iam",
|
||||
AwsBedrockLLMAccessKeyId: !!process.env.AWS_BEDROCK_LLM_ACCESS_KEY_ID,
|
||||
|
||||
288
server/utils/AiProviders/foundry/index.js
Normal file
288
server/utils/AiProviders/foundry/index.js
Normal file
@@ -0,0 +1,288 @@
|
||||
const { NativeEmbedder } = require("../../EmbeddingEngines/native");
|
||||
const {
|
||||
LLMPerformanceMonitor,
|
||||
} = require("../../helpers/chat/LLMPerformanceMonitor");
|
||||
const {
|
||||
handleDefaultStreamResponseV2,
|
||||
formatChatHistory,
|
||||
} = require("../../helpers/chat/responses");
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
|
||||
class FoundryLLM {
|
||||
/** @see FoundryLLM.cacheContextWindows */
|
||||
static modelContextWindows = {};
|
||||
|
||||
constructor(embedder = null, modelPreference = null) {
|
||||
if (!process.env.FOUNDRY_BASE_PATH)
|
||||
throw new Error("No Foundry Base Path was set.");
|
||||
|
||||
this.className = "FoundryLLM";
|
||||
this.model = modelPreference || process.env.FOUNDRY_MODEL_PREF;
|
||||
this.openai = new OpenAIApi({
|
||||
baseURL: parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH),
|
||||
apiKey: null,
|
||||
});
|
||||
|
||||
this.embedder = embedder ?? new NativeEmbedder();
|
||||
this.defaultTemp = 0.7;
|
||||
FoundryLLM.cacheContextWindows(true).then(() => {
|
||||
this.limits = {
|
||||
history: this.promptWindowLimit() * 0.15,
|
||||
system: this.promptWindowLimit() * 0.15,
|
||||
user: this.promptWindowLimit() * 0.7,
|
||||
};
|
||||
|
||||
this.#log(
|
||||
`Loaded with model: ${this.model} with context window: ${this.promptWindowLimit()}`
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
static #slog(text, ...args) {
|
||||
console.log(`\x1b[36m[FoundryLLM]\x1b[0m ${text}`, ...args);
|
||||
}
|
||||
|
||||
#log(text, ...args) {
|
||||
console.log(`\x1b[36m[${this.className}]\x1b[0m ${text}`, ...args);
|
||||
}
|
||||
|
||||
#appendContext(contextTexts = []) {
|
||||
if (!contextTexts || !contextTexts.length) return "";
|
||||
return (
|
||||
"\nContext:\n" +
|
||||
contextTexts
|
||||
.map((text, i) => {
|
||||
return `[CONTEXT ${i}]:\n${text}\n[END CONTEXT ${i}]\n\n`;
|
||||
})
|
||||
.join("")
|
||||
);
|
||||
}
|
||||
|
||||
streamingEnabled() {
|
||||
return "streamGetChatCompletion" in this;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache the context windows for the Foundry models.
|
||||
* This is done once and then cached for the lifetime of the server. This is absolutely necessary to ensure that the context windows are correct.
|
||||
* Foundry Local has a weird behavior that when max_completion_tokens is unset it will only allow the output to be 1024 tokens.
|
||||
*
|
||||
* If you pass in too large of a max_completion_tokens, it will throw an error.
|
||||
* If you pass in too little of a max_completion_tokens, you will get stubbed outputs before you reach a real "stop" token.
|
||||
* So we need to cache the context windows and use them for the lifetime of the server.
|
||||
* @param {boolean} force
|
||||
* @returns
|
||||
*/
|
||||
static async cacheContextWindows(force = false) {
|
||||
try {
|
||||
// Skip if we already have cached context windows and we're not forcing a refresh
|
||||
if (Object.keys(FoundryLLM.modelContextWindows).length > 0 && !force)
|
||||
return;
|
||||
|
||||
const openai = new OpenAIApi({
|
||||
baseURL: parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH),
|
||||
apiKey: null,
|
||||
});
|
||||
(await openai.models.list().then((result) => result.data)).map(
|
||||
(model) => {
|
||||
const contextWindow =
|
||||
Number(model.maxInputTokens) + Number(model.maxOutputTokens);
|
||||
FoundryLLM.modelContextWindows[model.id] = contextWindow;
|
||||
}
|
||||
);
|
||||
FoundryLLM.#slog(`Context windows cached for all models!`);
|
||||
} catch (e) {
|
||||
FoundryLLM.#slog(`Error caching context windows: ${e.message}`);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Unload a model from the Foundry engine forcefully
|
||||
* If the model is invalid, we just ignore the error. This is a util
|
||||
* simply to have the foundry engine drop the resources for the model.
|
||||
*
|
||||
* @param {string} modelName
|
||||
* @returns {Promise<boolean>}
|
||||
*/
|
||||
static async unloadModelFromEngine(modelName) {
|
||||
const basePath = parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH);
|
||||
const baseUrl = new URL(basePath);
|
||||
baseUrl.pathname = `/openai/unload/${modelName}`;
|
||||
baseUrl.searchParams.set("force", "true");
|
||||
return await fetch(baseUrl.toString())
|
||||
.then((res) => res.json())
|
||||
.catch(() => null);
|
||||
}
|
||||
|
||||
static promptWindowLimit(modelName) {
|
||||
let userDefinedLimit = null;
|
||||
const systemDefinedLimit =
|
||||
Number(this.modelContextWindows[modelName]) || 4096;
|
||||
|
||||
if (
|
||||
process.env.FOUNDRY_MODEL_TOKEN_LIMIT &&
|
||||
!isNaN(Number(process.env.FOUNDRY_MODEL_TOKEN_LIMIT)) &&
|
||||
Number(process.env.FOUNDRY_MODEL_TOKEN_LIMIT) > 0
|
||||
)
|
||||
userDefinedLimit = Number(process.env.FOUNDRY_MODEL_TOKEN_LIMIT);
|
||||
|
||||
// The user defined limit is always higher priority than the context window limit, but it cannot be higher than the context window limit
|
||||
// so we return the minimum of the two, if there is no user defined limit, we return the system defined limit as-is.
|
||||
if (userDefinedLimit !== null)
|
||||
return Math.min(userDefinedLimit, systemDefinedLimit);
|
||||
return systemDefinedLimit;
|
||||
}
|
||||
|
||||
promptWindowLimit() {
|
||||
return this.constructor.promptWindowLimit(this.model);
|
||||
}
|
||||
|
||||
async isValidChatCompletionModel(_ = "") {
|
||||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates appropriate content array for a message + attachments.
|
||||
* @param {{userPrompt:string, attachments: import("../../helpers").Attachment[]}}
|
||||
* @returns {string|object[]}
|
||||
*/
|
||||
#generateContent({ userPrompt, attachments = [] }) {
|
||||
if (!attachments.length) {
|
||||
return userPrompt;
|
||||
}
|
||||
|
||||
const content = [{ type: "text", text: userPrompt }];
|
||||
for (let attachment of attachments) {
|
||||
content.push({
|
||||
type: "image_url",
|
||||
image_url: {
|
||||
url: attachment.contentString,
|
||||
detail: "auto",
|
||||
},
|
||||
});
|
||||
}
|
||||
return content.flat();
|
||||
}
|
||||
|
||||
/**
|
||||
* Construct the user prompt for this model.
|
||||
* @param {{attachments: import("../../helpers").Attachment[]}} param0
|
||||
* @returns
|
||||
*/
|
||||
constructPrompt({
|
||||
systemPrompt = "",
|
||||
contextTexts = [],
|
||||
chatHistory = [],
|
||||
userPrompt = "",
|
||||
attachments = [],
|
||||
}) {
|
||||
const prompt = {
|
||||
role: "system",
|
||||
content: `${systemPrompt}${this.#appendContext(contextTexts)}`,
|
||||
};
|
||||
return [
|
||||
prompt,
|
||||
...formatChatHistory(chatHistory, this.#generateContent),
|
||||
{
|
||||
role: "user",
|
||||
content: this.#generateContent({ userPrompt, attachments }),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
async getChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
`Foundry chat: ${this.model} is not valid or defined model for chat completion!`
|
||||
);
|
||||
|
||||
const result = await LLMPerformanceMonitor.measureAsyncFunction(
|
||||
this.openai.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
messages,
|
||||
temperature,
|
||||
max_completion_tokens: this.promptWindowLimit(),
|
||||
})
|
||||
.catch((e) => {
|
||||
throw new Error(e.message);
|
||||
})
|
||||
);
|
||||
|
||||
if (
|
||||
!result.output.hasOwnProperty("choices") ||
|
||||
result.output.choices.length === 0
|
||||
)
|
||||
return null;
|
||||
|
||||
return {
|
||||
textResponse: result.output.choices[0].message.content,
|
||||
metrics: {
|
||||
prompt_tokens: result.output.usage.prompt_tokens || 0,
|
||||
completion_tokens: result.output.usage.completion_tokens || 0,
|
||||
total_tokens: result.output.usage.total_tokens || 0,
|
||||
outputTps: result.output.usage.completion_tokens / result.duration,
|
||||
duration: result.duration,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
async streamGetChatCompletion(messages = null, { temperature = 0.7 }) {
|
||||
if (!this.model)
|
||||
throw new Error(
|
||||
`Foundry chat: ${this.model} is not valid or defined model for chat completion!`
|
||||
);
|
||||
|
||||
const measuredStreamRequest = await LLMPerformanceMonitor.measureStream(
|
||||
this.openai.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
temperature,
|
||||
max_completion_tokens: this.promptWindowLimit(),
|
||||
}),
|
||||
messages
|
||||
);
|
||||
return measuredStreamRequest;
|
||||
}
|
||||
|
||||
handleStream(response, stream, responseProps) {
|
||||
return handleDefaultStreamResponseV2(response, stream, responseProps);
|
||||
}
|
||||
|
||||
// Simple wrapper for dynamic embedder & normalize interface for all LLM implementations
|
||||
async embedTextInput(textInput) {
|
||||
return await this.embedder.embedTextInput(textInput);
|
||||
}
|
||||
async embedChunks(textChunks = []) {
|
||||
return await this.embedder.embedChunks(textChunks);
|
||||
}
|
||||
|
||||
async compressMessages(promptArgs = {}, rawHistory = []) {
|
||||
const { messageArrayCompressor } = require("../../helpers/chat");
|
||||
const messageArray = this.constructPrompt(promptArgs);
|
||||
return await messageArrayCompressor(this, messageArray, rawHistory);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse the base path for the Foundry container API. Since the base path must end in /v1 and cannot have a trailing slash,
|
||||
* and the user can possibly set it to anything and likely incorrectly due to pasting behaviors, we need to ensure it is in the correct format.
|
||||
* @param {string} basePath
|
||||
* @returns {string}
|
||||
*/
|
||||
function parseFoundryBasePath(providedBasePath = "") {
|
||||
try {
|
||||
const baseURL = new URL(providedBasePath);
|
||||
const basePath = `${baseURL.origin}/v1`;
|
||||
return basePath;
|
||||
} catch (e) {
|
||||
return providedBasePath;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
FoundryLLM,
|
||||
parseFoundryBasePath,
|
||||
};
|
||||
@@ -972,6 +972,8 @@ ${this.getHistory({ to: route.to })
|
||||
return new Providers.DellProAiStudioProvider({ model: config.model });
|
||||
case "cometapi":
|
||||
return new Providers.CometApiProvider({ model: config.model });
|
||||
case "foundry":
|
||||
return new Providers.FoundryProvider({ model: config.model });
|
||||
default:
|
||||
throw new Error(
|
||||
`Unknown provider: ${config.provider}. Please use a valid provider.`
|
||||
|
||||
@@ -18,6 +18,7 @@ const { ChatOllama } = require("@langchain/community/chat_models/ollama");
|
||||
const { toValidNumber, safeJsonParse } = require("../../../http");
|
||||
const { getLLMProviderClass } = require("../../../helpers");
|
||||
const { parseLMStudioBasePath } = require("../../../AiProviders/lmStudio");
|
||||
const { parseFoundryBasePath } = require("../../../AiProviders/foundry");
|
||||
|
||||
const DEFAULT_WORKSPACE_PROMPT =
|
||||
"You are a helpful ai assistant who can assist the user and use tools available to help answer the users prompts and questions.";
|
||||
@@ -193,6 +194,14 @@ class Provider {
|
||||
apiKey: process.env.MOONSHOT_AI_API_KEY ?? null,
|
||||
...config,
|
||||
});
|
||||
case "cometapi":
|
||||
return new ChatOpenAI({
|
||||
configuration: {
|
||||
baseURL: "https://api.cometapi.com/v1",
|
||||
},
|
||||
apiKey: process.env.COMETAPI_LLM_API_KEY ?? null,
|
||||
...config,
|
||||
});
|
||||
// OSS Model Runners
|
||||
// case "anythingllm_ollama":
|
||||
// return new ChatOllama({
|
||||
@@ -252,14 +261,15 @@ class Provider {
|
||||
apiKey: null,
|
||||
...config,
|
||||
});
|
||||
case "cometapi":
|
||||
case "foundry": {
|
||||
return new ChatOpenAI({
|
||||
configuration: {
|
||||
baseURL: "https://api.cometapi.com/v1",
|
||||
baseURL: parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH),
|
||||
},
|
||||
apiKey: process.env.COMETAPI_LLM_API_KEY ?? null,
|
||||
apiKey: null,
|
||||
...config,
|
||||
});
|
||||
}
|
||||
|
||||
default:
|
||||
throw new Error(`Unsupported provider ${provider} for this task.`);
|
||||
|
||||
103
server/utils/agents/aibitat/providers/foundry.js
Normal file
103
server/utils/agents/aibitat/providers/foundry.js
Normal file
@@ -0,0 +1,103 @@
|
||||
const OpenAI = require("openai");
|
||||
const Provider = require("./ai-provider.js");
|
||||
const InheritMultiple = require("./helpers/classes.js");
|
||||
const UnTooled = require("./helpers/untooled.js");
|
||||
const {
|
||||
parseFoundryBasePath,
|
||||
FoundryLLM,
|
||||
} = require("../../../AiProviders/foundry/index.js");
|
||||
|
||||
/**
|
||||
* The agent provider for the Foundry provider.
|
||||
* Uses untooled because it doesn't support tool calling.
|
||||
*/
|
||||
class FoundryProvider extends InheritMultiple([Provider, UnTooled]) {
|
||||
model;
|
||||
|
||||
constructor(config = {}) {
|
||||
const { model = process.env.FOUNDRY_MODEL_PREF } = config;
|
||||
super();
|
||||
const client = new OpenAI({
|
||||
baseURL: parseFoundryBasePath(process.env.FOUNDRY_BASE_PATH),
|
||||
apiKey: null,
|
||||
maxRetries: 3,
|
||||
});
|
||||
|
||||
this._client = client;
|
||||
this.model = model;
|
||||
this.verbose = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the client.
|
||||
* @returns {OpenAI.OpenAI}
|
||||
*/
|
||||
get client() {
|
||||
return this._client;
|
||||
}
|
||||
|
||||
get supportsAgentStreaming() {
|
||||
return true;
|
||||
}
|
||||
|
||||
async #handleFunctionCallChat({ messages = [] }) {
|
||||
await FoundryLLM.cacheContextWindows();
|
||||
return await this.client.chat.completions
|
||||
.create({
|
||||
model: this.model,
|
||||
messages,
|
||||
max_completion_tokens: FoundryLLM.promptWindowLimit(this.model),
|
||||
})
|
||||
.then((result) => {
|
||||
if (!result.hasOwnProperty("choices"))
|
||||
throw new Error("Microsoft Foundry Local chat: No results!");
|
||||
if (result.choices.length === 0)
|
||||
throw new Error("Microsoft Foundry Local chat: No results length!");
|
||||
return result.choices[0].message.content;
|
||||
})
|
||||
.catch((_) => {
|
||||
return null;
|
||||
});
|
||||
}
|
||||
|
||||
async #handleFunctionCallStream({ messages = [] }) {
|
||||
await FoundryLLM.cacheContextWindows();
|
||||
return await this.client.chat.completions.create({
|
||||
model: this.model,
|
||||
stream: true,
|
||||
messages,
|
||||
max_completion_tokens: FoundryLLM.promptWindowLimit(this.model),
|
||||
});
|
||||
}
|
||||
|
||||
async stream(messages, functions = [], eventHandler = null) {
|
||||
return await UnTooled.prototype.stream.call(
|
||||
this,
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallStream.bind(this),
|
||||
eventHandler
|
||||
);
|
||||
}
|
||||
|
||||
async complete(messages, functions = []) {
|
||||
return await UnTooled.prototype.complete.call(
|
||||
this,
|
||||
messages,
|
||||
functions,
|
||||
this.#handleFunctionCallChat.bind(this)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the cost of the completion.
|
||||
*
|
||||
* @param _usage The completion to get the cost for.
|
||||
* @returns The cost of the completion.
|
||||
*/
|
||||
getCost(_usage) {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = FoundryProvider;
|
||||
@@ -25,6 +25,7 @@ const GeminiProvider = require("./gemini.js");
|
||||
const DellProAiStudioProvider = require("./dellProAiStudio.js");
|
||||
const MoonshotAiProvider = require("./moonshotAi.js");
|
||||
const CometApiProvider = require("./cometapi.js");
|
||||
const FoundryProvider = require("./foundry.js");
|
||||
|
||||
module.exports = {
|
||||
OpenAIProvider,
|
||||
@@ -54,4 +55,5 @@ module.exports = {
|
||||
GeminiProvider,
|
||||
DellProAiStudioProvider,
|
||||
MoonshotAiProvider,
|
||||
FoundryProvider,
|
||||
};
|
||||
|
||||
@@ -209,6 +209,11 @@ class AgentHandler {
|
||||
throw new Error("CometAPI API Key must be provided to use agents.");
|
||||
break;
|
||||
|
||||
case "foundry":
|
||||
if (!process.env.FOUNDRY_BASE_PATH)
|
||||
throw new Error("Foundry base path must be provided to use agents.");
|
||||
break;
|
||||
|
||||
default:
|
||||
throw new Error(
|
||||
"No workspace agent provider set. Please set your agent provider in the workspace's settings"
|
||||
@@ -281,6 +286,8 @@ class AgentHandler {
|
||||
return process.env.DPAIS_LLM_MODEL_PREF;
|
||||
case "cometapi":
|
||||
return process.env.COMETAPI_LLM_MODEL_PREF ?? "gpt-5-mini";
|
||||
case "foundry":
|
||||
return process.env.FOUNDRY_MODEL_PREF ?? null;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ const { parseNvidiaNimBasePath } = require("../AiProviders/nvidiaNim");
|
||||
const { fetchPPIOModels } = require("../AiProviders/ppio");
|
||||
const { GeminiLLM } = require("../AiProviders/gemini");
|
||||
const { fetchCometApiModels } = require("../AiProviders/cometapi");
|
||||
const { parseFoundryBasePath } = require("../AiProviders/foundry");
|
||||
|
||||
const SUPPORT_CUSTOM_MODELS = [
|
||||
"openai",
|
||||
@@ -35,6 +36,7 @@ const SUPPORT_CUSTOM_MODELS = [
|
||||
"ppio",
|
||||
"dpais",
|
||||
"moonshotai",
|
||||
"foundry",
|
||||
// Embedding Engines
|
||||
"native-embedder",
|
||||
];
|
||||
@@ -92,6 +94,8 @@ async function getCustomModels(provider = "", apiKey = null, basePath = null) {
|
||||
return await getDellProAiStudioModels(basePath);
|
||||
case "moonshotai":
|
||||
return await getMoonshotAiModels(apiKey);
|
||||
case "foundry":
|
||||
return await getFoundryModels(basePath);
|
||||
case "native-embedder":
|
||||
return await getNativeEmbedderModels();
|
||||
default:
|
||||
@@ -728,6 +732,33 @@ async function getMoonshotAiModels(_apiKey = null) {
|
||||
return { models, error: null };
|
||||
}
|
||||
|
||||
async function getFoundryModels(basePath = null) {
|
||||
try {
|
||||
const { OpenAI: OpenAIApi } = require("openai");
|
||||
const openai = new OpenAIApi({
|
||||
baseURL: parseFoundryBasePath(basePath || process.env.FOUNDRY_BASE_PATH),
|
||||
apiKey: null,
|
||||
});
|
||||
const models = await openai.models
|
||||
.list()
|
||||
.then((results) =>
|
||||
results.data.map((model) => ({
|
||||
...model,
|
||||
name: model.id,
|
||||
}))
|
||||
)
|
||||
.catch((e) => {
|
||||
console.error(`Foundry:listModels`, e.message);
|
||||
return [];
|
||||
});
|
||||
|
||||
return { models, error: null };
|
||||
} catch (e) {
|
||||
console.error(`Foundry:getFoundryModels`, e.message);
|
||||
return { models: [], error: "Could not fetch Foundry Models" };
|
||||
}
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
getCustomModels,
|
||||
SUPPORT_CUSTOM_MODELS,
|
||||
|
||||
@@ -215,6 +215,9 @@ function getLLMProvider({ provider = null, model = null } = {}) {
|
||||
case "cometapi":
|
||||
const { CometApiLLM } = require("../AiProviders/cometapi");
|
||||
return new CometApiLLM(embedder, model);
|
||||
case "foundry":
|
||||
const { FoundryLLM } = require("../AiProviders/foundry");
|
||||
return new FoundryLLM(embedder, model);
|
||||
default:
|
||||
throw new Error(
|
||||
`ENV: No valid LLM_PROVIDER value found in environment! Using ${process.env.LLM_PROVIDER}`
|
||||
@@ -368,6 +371,9 @@ function getLLMProviderClass({ provider = null } = {}) {
|
||||
case "cometapi":
|
||||
const { CometApiLLM } = require("../AiProviders/cometapi");
|
||||
return CometApiLLM;
|
||||
case "foundry":
|
||||
const { FoundryLLM } = require("../AiProviders/foundry");
|
||||
return FoundryLLM;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
@@ -438,6 +444,8 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
|
||||
return process.env.MOONSHOT_AI_MODEL_PREF;
|
||||
case "cometapi":
|
||||
return process.env.COMETAPI_LLM_MODEL_PREF;
|
||||
case "foundry":
|
||||
return process.env.FOUNDRY_MODEL_PREF;
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
|
||||
@@ -705,6 +705,28 @@ const KEY_MAPPING = {
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
|
||||
// Foundry Options
|
||||
FoundryBasePath: {
|
||||
envKey: "FOUNDRY_BASE_PATH",
|
||||
checks: [isNotEmpty],
|
||||
},
|
||||
FoundryModelPref: {
|
||||
envKey: "FOUNDRY_MODEL_PREF",
|
||||
checks: [isNotEmpty],
|
||||
postUpdate: [
|
||||
// On new model selection, re-cache the context windows
|
||||
async (_, prevValue, __) => {
|
||||
const { FoundryLLM } = require("../AiProviders/foundry");
|
||||
await FoundryLLM.unloadModelFromEngine(prevValue);
|
||||
await FoundryLLM.cacheContextWindows(true);
|
||||
},
|
||||
],
|
||||
},
|
||||
FoundryModelTokenLimit: {
|
||||
envKey: "FOUNDRY_MODEL_TOKEN_LIMIT",
|
||||
checks: [],
|
||||
},
|
||||
|
||||
// CometAPI Options
|
||||
CometApiLLMApiKey: {
|
||||
envKey: "COMETAPI_LLM_API_KEY",
|
||||
@@ -828,6 +850,7 @@ function supportedLLM(input = "") {
|
||||
"dpais",
|
||||
"moonshotai",
|
||||
"cometapi",
|
||||
"foundry",
|
||||
].includes(input);
|
||||
return validSelection ? null : `${input} is not a valid LLM provider.`;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user