WIP quickstart env

This commit is contained in:
Timothy Carambat
2026-01-28 14:58:54 -08:00
parent 88459ce2d2
commit 255e0985c8
4 changed files with 128 additions and 3 deletions

View File

@@ -205,9 +205,11 @@ const SystemSettings = {
},
currentSettings: async function () {
const { hasVectorCachedFiles } = require("../utils/files");
const llmProvider = process.env.LLM_PROVIDER;
const vectorDB = process.env.VECTOR_DB;
const embeddingEngine = process.env.EMBEDDING_ENGINE ?? "native";
const { getStartupDefaults } = require("../utils/boot/defaults");
const defaults = getStartupDefaults();
const llmProvider = process.env.LLM_PROVIDER ?? defaults.llmProvider;
const vectorDB = process.env.VECTOR_DB ?? defaults.vectorDB;
const embeddingEngine = process.env.EMBEDDING_ENGINE ?? defaults.embeddingEngine;
return {
// --------------------------------------------------------
// General Settings

View File

@@ -0,0 +1,48 @@
const { supportedLLM } = require("../helpers/updateENV");
const { getProcessEnvValue } = require("../helpers");
const LLM_PROVIDER_DEFAULT = null;
const VECTOR_DB_DEFAULT = null;
const EMBEDDING_ENGINE_DEFAULT = "native";
function getStartupDefaults() {
validateLLMQuickSetup();
return {
llmProvider: process.env.LLM_PROVIDER ?? LLM_PROVIDER_DEFAULT,
vectorDB: process.env.VECTOR_DB ?? VECTOR_DB_DEFAULT,
embeddingEngine: process.env.EMBEDDING_ENGINE ?? EMBEDDING_ENGINE_DEFAULT,
};
}
function validateLLMQuickSetup() {
try {
if(!!process.env.LLM_PROVIDER) return;
if(!process.env.LLM_QUICKSETUP) return;
const quickSetupString = process.env.LLM_QUICKSETUP;
let [provider, model] = [quickSetupString.slice(0, quickSetupString.indexOf(':')), quickSetupString.slice(quickSetupString.indexOf(':') + 1)];
if(!provider || !model) throw new Error("Invalid LLM quick setup string. Format: provider:model_tag. eg: dmr:ai/qwen3:4B-UD-Q4_K_XL");
// Aliases for quick setup
switch(provider) {
case "dmr":
provider = "docker-model-runner";
break;
default:
break;
}
if(supportedLLM(provider) !== null) throw new Error(`Invalid LLM provider: ${provider}`);
process.env.LLM_PROVIDER = provider;
const expectedModelPrefKey = getProcessEnvValue(provider);
if (model && expectedModelPrefKey) process.env[expectedModelPrefKey] = model;
} catch (error) {
console.error("Error validating LLM quick setup: " + error.message);
} finally {
if(process.env.LLM_QUICKSETUP) delete process.env.LLM_QUICKSETUP;
}
}
module.exports = {
getStartupDefaults,
}

View File

@@ -487,6 +487,79 @@ function getBaseLLMProviderModel({ provider = null } = {}) {
}
}
function getProcessEnvValue(provider, defaultValue = null) {
switch (provider) {
case "openai":
return 'OPEN_MODEL_PREF';
case "azure":
return 'OPEN_MODEL_PREF';
case "anthropic":
return 'ANTHROPIC_MODEL_PREF';
case "gemini":
return 'GEMINI_LLM_MODEL_PREF';
case "lmstudio":
return 'LMSTUDIO_MODEL_PREF';
case "localai":
return 'LOCAL_AI_MODEL_PREF';
case "ollama":
return 'OLLAMA_MODEL_PREF';
case "togetherai":
return 'TOGETHER_AI_MODEL_PREF';
case "fireworksai":
return 'FIREWORKS_AI_LLM_MODEL_PREF';
case "perplexity":
return 'PERPLEXITY_MODEL_PREF';
case "openrouter":
return 'OPENROUTER_MODEL_PREF';
case "mistral":
return 'MISTRAL_MODEL_PREF';
case "huggingface":
return defaultValue;
case "groq":
return 'GROQ_MODEL_PREF';
case "koboldcpp":
return 'KOBOLD_CPP_MODEL_PREF';
case "textgenwebui":
return defaultValue;
case "cohere":
return 'COHERE_MODEL_PREF';
case "litellm":
return 'LITE_LLM_MODEL_PREF';
case "generic-openai":
return 'GENERIC_OPEN_AI_MODEL_PREF';
case "bedrock":
return 'AWS_BEDROCK_LLM_MODEL_PREFERENCE';
case "deepseek":
return 'DEEPSEEK_MODEL_PREF';
case "apipie":
return 'APIPIE_LLM_MODEL_PREF';
case "novita":
return 'NOVITA_LLM_MODEL_PREF';
case "xai":
return 'XAI_LLM_MODEL_PREF';
case "nvidia-nim":
return 'NVIDIA_NIM_LLM_MODEL_PREF';
case "ppio":
return 'PPIO_MODEL_PREF';
case "dpais":
return 'DPAIS_LLM_MODEL_PREF';
case "moonshotai":
return 'MOONSHOT_AI_MODEL_PREF';
case "cometapi":
return 'COMETAPI_LLM_MODEL_PREF';
case "foundry":
return 'FOUNDRY_MODEL_PREF';
case "zai":
return 'ZAI_MODEL_PREF';
case "giteeai":
return 'GITEE_AI_MODEL_PREF';
case "docker-model-runner":
return 'DOCKER_MODEL_RUNNER_LLM_MODEL_PREF';
default:
return defaultValue;
}
}
// Some models have lower restrictions on chars that can be encoded in a single pass
// and by default we assume it can handle 1,000 chars, but some models use work with smaller
// chars so here we can override that value when embedding information.
@@ -537,6 +610,7 @@ module.exports = {
getVectorDbClass,
getLLMProviderClass,
getBaseLLMProviderModel,
getProcessEnvValue,
getLLMProvider,
toChunks,
humanFileSize,

View File

@@ -1302,4 +1302,5 @@ function dumpENV() {
module.exports = {
dumpENV,
updateENV,
supportedLLM,
};