Skip to content

Commit

Permalink
Update download url for qwen models (#90)
Browse files Browse the repository at this point in the history
  • Loading branch information
ling0322 authored Aug 19, 2024
1 parent b78c02a commit 0673a76
Show file tree
Hide file tree
Showing 2 changed files with 54 additions and 16 deletions.
14 changes: 9 additions & 5 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,15 @@ Welcome to libLLM, an open-source project designed for efficient inference of la

## Model download:

| Model | Download |
|-------------|----------------|
| Index-1.9B-Character (Role-playing) | 🤗[Huggingface](https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/blob/main/bilibili-index-1.9b-character-q4.llmpkg) |
| Index-1.9B-Chat | 🤗[Huggingface](https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/blob/main/bilibili-index-1.9b-chat-q4.llmpkg) |
| Whisper-large-v3 | 🤗[Huggingface](https://huggingface.co/ling0322/whisper-libllm/resolve/main/whisper-large-v3-q4.llmpkg) |
| Model | Download | llm Command |
|-------------|----------------|---------------|
| Index-1.9B-Character (Role-playing) | [🤗[HF](https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/blob/main/bilibili-index-1.9b-character-q4.llmpkg)] | llm chat -m index:character |
| Index-1.9B-Chat | [🤗[HF](https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/blob/main/bilibili-index-1.9b-chat-q4.llmpkg)] | llm chat -m index |
| Qwen2-1.5B-Instruct | [🤗[HF](https://huggingface.co/ling0322/qwen-libllm/blob/main/qwen2-1.5b-instruct-q4.llmpkg)] | llm chat -m qwen:1.5b |
| Qwen2-7B-Instruct | [🤗[HF](https://huggingface.co/ling0322/qwen-libllm/blob/main/qwen2-7b-instruct-q4.llmpkg)] | llm chat -m qwen:7b |
| Whisper-large-v3 | [🤗[HF](https://huggingface.co/ling0322/whisper-libllm/resolve/main/whisper-large-v3-q4.llmpkg)] | llm transcribe -m whisper |

`HF` = HuggingFace

## Recent updates

Expand Down
56 changes: 45 additions & 11 deletions go/bin/download.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"net/http"
"os"
"path"
"path/filepath"
"runtime"

"github.com/ling0322/libllm/go/llm"
Expand All @@ -20,15 +21,44 @@ var ErrInvalidModelName = errors.New("invalid model name")
var ModelCacheDir = getModelCacheDir()

var modelUrls = map[string]string{
"index-chat": "https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/resolve/main/bilibili-index-1.9b-chat-q4.llmpkg",
"index-character": "https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/resolve/main/bilibili-index-1.9b-character-q4.llmpkg",
"whisper-large-v3": "https://huggingface.co/ling0322/whisper-libllm/resolve/main/whisper-large-v3-q4.llmpkg",
"index:chat:q4": "https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/resolve/main/bilibili-index-1.9b-chat-q4.llmpkg",
"index:character:q4": "https://huggingface.co/ling0322/bilibili-index-1.9b-libllm/resolve/main/bilibili-index-1.9b-character-q4.llmpkg",
"whisper:large-v3:q4": "https://huggingface.co/ling0322/whisper-libllm/resolve/main/whisper-large-v3-q4.llmpkg",
"qwen:7b:q4": "https://huggingface.co/ling0322/qwen-libllm/resolve/main/qwen2-7b-instruct-q4.llmpkg",
"qwen:1.5b:q4": "https://huggingface.co/ling0322/qwen-libllm/resolve/main/qwen2-1.5b-instruct-q4.llmpkg",
}

var modelFilenames = map[string]string{
"index-chat": "bilibili-index-1.9b-chat-q4.llmpkg",
"index-character": "bilibili-index-1.9b-character-q4.llmpkg",
"whisper-large-v3": "whisper-large-v3-q4.llmpkg",
"index:chat:q4": "bilibili-index-1.9b-chat-q4.llmpkg",
"index:character:q4": "bilibili-index-1.9b-character-q4.llmpkg",
"whisper:large-v3:q4": "whisper-large-v3-q4.llmpkg",
"qwen:7b:q4": "qwen2-7b-instruct-q4.llmpkg",
"qwen:1.5b:q4": "qwen2-1.5b-instruct-q4.llmpkg",
}

var defaultModelNames = map[string]string{
"index": "index:chat:q4",
"index:chat": "index:chat:q4",
"index:chat:q4": "index:chat:q4",
"index:character": "index:character:q4",
"index:character:q4": "index:character:q4",
"whisper": "whisper:large-v3:q4",
"whisper:large-v3": "whisper:large-v3:q4",
"whisper:large-v3:q4": "whisper:large-v3:q4",
"qwen": "qwen:7b:q4",
"qwen:7b": "qwen:7b:q4",
"qwen:7b:q4": "qwen:7b:q4",
"qwen:1.5b": "qwen:1.5b:q4",
"qwen:1.5b:q4": "qwen:1.5b:q4",
}

func resolveModelName(name string) (resolvedName string, err error) {
resolvedName, ok := defaultModelNames[name]
if !ok {
return "", fmt.Errorf("unable to resolve model name \"%s\"", name)
}

return
}

func getModelCacheDir() string {
Expand Down Expand Up @@ -90,7 +120,7 @@ func downloadModel(name string) (modelPath string, err error) {

bar := progressbar.DefaultBytes(
resp.ContentLength,
"Downloading",
filename,
)
_, err = io.Copy(io.MultiWriter(f, bar), resp.Body)
if err != nil {
Expand Down Expand Up @@ -124,6 +154,11 @@ func checkModelInCache(name string) (modelPath string, err error) {
}

func getOrDownloadModel(name string) (modelPath string, err error) {
name, err = resolveModelName(name)
if err != nil {
return
}

modelPath, err = checkModelInCache(name)
if err == nil {
return
Expand All @@ -136,11 +171,10 @@ func createModelAutoDownload(nameOrPath string, device llm.Device) (llm.Model, e
var modelPath string
var err error

_, ok := modelFilenames[nameOrPath]
if ok {
modelPath, err = getOrDownloadModel(nameOrPath)
} else {
if filepath.Ext(nameOrPath) == ".llmpkg" {
modelPath = nameOrPath
} else {
modelPath, err = getOrDownloadModel(nameOrPath)
}

if err != nil {
Expand Down

0 comments on commit 0673a76

Please sign in to comment.