diff --git a/extensions/assistant-extension/package.json b/extensions/assistant-extension/package.json index a767632442..094f9820cb 100644 --- a/extensions/assistant-extension/package.json +++ b/extensions/assistant-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/assistant-extension", - "productName": "Jan Assistant Extension", + "productName": "Jan Assistant", "version": "1.0.1", "description": "This extension enables assistants, including Jan, a default assistant that can call all downloaded models", "main": "dist/index.js", diff --git a/extensions/conversational-extension/package.json b/extensions/conversational-extension/package.json index 712a9883ce..a29967da46 100644 --- a/extensions/conversational-extension/package.json +++ b/extensions/conversational-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/conversational-extension", - "productName": "Conversational Extension", + "productName": "Conversational", "version": "1.0.0", "description": "This extension enables conversations and state persistence via your filesystem", "main": "dist/index.js", diff --git a/extensions/huggingface-extension/package.json b/extensions/huggingface-extension/package.json index 234b806d84..c0c18c5ebc 100644 --- a/extensions/huggingface-extension/package.json +++ b/extensions/huggingface-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/huggingface-extension", - "productName": "HuggingFace Extension", + "productName": "HuggingFace", "version": "1.0.0", "description": "Hugging Face extension for converting HF models to GGUF", "main": "dist/index.js", diff --git a/extensions/inference-groq-extension/package.json b/extensions/inference-groq-extension/package.json index 8d70d1d9fe..faf1b4a98b 100644 --- a/extensions/inference-groq-extension/package.json +++ b/extensions/inference-groq-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/inference-groq-extension", - "productName": "Groq Inference Engine Extension", + "productName": "Groq Inference Engine", "version": "1.0.0", "description": "This extension enables fast Groq chat completion API calls", "main": "dist/index.js", diff --git a/extensions/inference-mistral-extension/package.json b/extensions/inference-mistral-extension/package.json index c1de1f959c..86fa8bc77d 100644 --- a/extensions/inference-mistral-extension/package.json +++ b/extensions/inference-mistral-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/inference-mistral-extension", - "productName": "Mistral AI Inference Engine Extension", + "productName": "MistralAI Inference Engine", "version": "1.0.0", "description": "This extension enables Mistral chat completion API calls", "main": "dist/index.js", diff --git a/extensions/inference-nitro-extension/package.json b/extensions/inference-nitro-extension/package.json index 78558b1c39..1a0b5c0d08 100644 --- a/extensions/inference-nitro-extension/package.json +++ b/extensions/inference-nitro-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/inference-nitro-extension", - "productName": "Nitro Inference Engine Extension", + "productName": "Nitro Inference Engine", "version": "1.0.0", "description": "This extension embeds Nitro, a lightweight (3mb) inference engine written in C++. See https://nitro.jan.ai.\nUse this setting if you encounter errors related to **CUDA toolkit** during application execution.", "main": "dist/index.js", diff --git a/extensions/inference-openai-extension/package.json b/extensions/inference-openai-extension/package.json index 3435a53462..2dd75d300e 100644 --- a/extensions/inference-openai-extension/package.json +++ b/extensions/inference-openai-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/inference-openai-extension", - "productName": "OpenAI Inference Engine Extension", + "productName": "OpenAI Inference Engine", "version": "1.0.0", "description": "This extension enables OpenAI chat completion API calls", "main": "dist/index.js", diff --git a/extensions/inference-triton-trtllm-extension/package.json b/extensions/inference-triton-trtllm-extension/package.json index 9ce8f11a91..06c4976e1a 100644 --- a/extensions/inference-triton-trtllm-extension/package.json +++ b/extensions/inference-triton-trtllm-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/inference-triton-trt-llm-extension", - "productName": "Triton-TRT-LLM Inference Engine Extension", + "productName": "Triton-TRT-LLM Inference Engine", "version": "1.0.0", "description": "This extension enables Nvidia's TensorRT-LLM as an inference engine option", "main": "dist/index.js", diff --git a/extensions/model-extension/package.json b/extensions/model-extension/package.json index c6b6593224..0967e16324 100644 --- a/extensions/model-extension/package.json +++ b/extensions/model-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/model-extension", - "productName": "Model Management Extension", + "productName": "Model Management", "version": "1.0.30", "description": "Model Management Extension provides model exploration and seamless downloads", "main": "dist/index.js", diff --git a/extensions/monitoring-extension/package.json b/extensions/monitoring-extension/package.json index c320db2ba2..e728b46291 100644 --- a/extensions/monitoring-extension/package.json +++ b/extensions/monitoring-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/monitoring-extension", - "productName": "System Monitoring Extension", + "productName": "System Monitoring", "version": "1.0.10", "description": "This extension provides system health and OS level data", "main": "dist/index.js", diff --git a/extensions/monitoring-extension/src/node/index.ts b/extensions/monitoring-extension/src/node/index.ts index bb0c4ac182..3f1be56098 100644 --- a/extensions/monitoring-extension/src/node/index.ts +++ b/extensions/monitoring-extension/src/node/index.ts @@ -49,7 +49,9 @@ const DEFAULT_SETTINGS: GpuSetting = { export const getGpuConfig = async (): Promise => { if (process.platform === 'darwin') return undefined - return JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) + if (existsSync(GPU_INFO_FILE)) + return JSON.parse(readFileSync(GPU_INFO_FILE, 'utf-8')) + return DEFAULT_SETTINGS } export const getResourcesInfo = async (): Promise => { diff --git a/extensions/tensorrt-llm-extension/package.json b/extensions/tensorrt-llm-extension/package.json index 02b0b4e8c6..c8eafb10d2 100644 --- a/extensions/tensorrt-llm-extension/package.json +++ b/extensions/tensorrt-llm-extension/package.json @@ -1,6 +1,6 @@ { "name": "@janhq/tensorrt-llm-extension", - "productName": "TensorRT-LLM Inference Engine Extension", + "productName": "TensorRT-LLM Inference Engine", "version": "0.0.3", "description": "This extension enables Nvidia's TensorRT-LLM for the fastest GPU acceleration. See the [setup guide](https://jan.ai/guides/providers/tensorrt-llm/) for next steps.", "main": "dist/index.js", diff --git a/web/hooks/useActiveModel.ts b/web/hooks/useActiveModel.ts index 34ffd1af73..1e648f60ee 100644 --- a/web/hooks/useActiveModel.ts +++ b/web/hooks/useActiveModel.ts @@ -126,33 +126,27 @@ export function useActiveModel() { }) } - const stopModel = useCallback( - async (model?: Model) => { - const stoppingModel = activeModel || model - if ( - !stoppingModel || - (!model && stateModel.state === 'stop' && stateModel.loading) - ) - return - - setStateModel({ state: 'stop', loading: true, model: stoppingModel }) - const engine = EngineManager.instance().get(stoppingModel.engine) - return engine - ?.unloadModel(stoppingModel) - .catch() - .then(() => { - setActiveModel(undefined) - setStateModel({ state: 'start', loading: false, model: undefined }) - loadModelController?.abort() - }) - }, - [activeModel, setActiveModel, setStateModel, stateModel] - ) + const stopModel = useCallback(async () => { + const stoppingModel = activeModel || stateModel.model + if (!stoppingModel || (stateModel.state === 'stop' && stateModel.loading)) + return + + setStateModel({ state: 'stop', loading: true, model: stoppingModel }) + const engine = EngineManager.instance().get(stoppingModel.engine) + return engine + ?.unloadModel(stoppingModel) + .catch() + .then(() => { + setActiveModel(undefined) + setStateModel({ state: 'start', loading: false, model: undefined }) + loadModelController?.abort() + }) + }, [activeModel, setActiveModel, setStateModel, stateModel]) const stopInference = useCallback(async () => { // Loading model if (stateModel.loading) { - stopModel(stateModel.model) + stopModel() return } if (!activeModel) return diff --git a/web/hooks/useFactoryReset.ts b/web/hooks/useFactoryReset.ts index 878461ef12..8364ca10d9 100644 --- a/web/hooks/useFactoryReset.ts +++ b/web/hooks/useFactoryReset.ts @@ -19,7 +19,7 @@ export const factoryResetStateAtom = atom(FactoryResetState.Idle) export default function useFactoryReset() { const defaultJanDataFolder = useAtomValue(defaultJanDataFolderAtom) - const { activeModel, stopModel } = useActiveModel() + const { stopModel } = useActiveModel() const setFactoryResetState = useSetAtom(factoryResetStateAtom) const resetAll = useCallback( @@ -44,11 +44,9 @@ export default function useFactoryReset() { await window.core?.api?.updateAppConfiguration(configuration) } - if (activeModel) { - setFactoryResetState(FactoryResetState.StoppingModel) - await stopModel() - await new Promise((resolve) => setTimeout(resolve, 4000)) - } + setFactoryResetState(FactoryResetState.StoppingModel) + await stopModel() + await new Promise((resolve) => setTimeout(resolve, 4000)) setFactoryResetState(FactoryResetState.DeletingData) await fs.rm(janDataFolderPath) @@ -59,7 +57,7 @@ export default function useFactoryReset() { await window.core?.api?.relaunch() }, - [defaultJanDataFolder, activeModel, stopModel, setFactoryResetState] + [defaultJanDataFolder, stopModel, setFactoryResetState] ) return {