Skip to content
This repository was archived by the owner on Sep 15, 2024. It is now read-only.

Commit

Permalink
[Cherry Pick] Head Repo & Additional Improvement for Model Vision (#279)
Browse files Browse the repository at this point in the history
* Add vision support (ChatGPTNextWeb#4076)

* Refactor [UI/UX] [Front End] [Chat] Remove Duplicate "onUserInput"

- [+] refactor(chat.tsx): remove duplicate onUserInput call and localStorage.setItem in _Chat function

* Feat [UI/UX] [Chat List] Search Support for Multimodal Content

- [+] feat(chat-list.tsx): add search support for array of MultimodalContent in ChatList component

* Style [UI/UX] [Chat List] Linting

- [+] style(chat-list.tsx): improve readability by breaking down lengthy if condition into multiple lines

* Adding Back Text Moderation

- [+] feat(openai.ts): add support for text moderation in chat method

* Feat [LLM APIs] [Google] InlineData

- [+] feat(google.ts): add InlineData to MessagePart, refactor message construction
- [+] chore(google.ts): add comments for clarity

* Style [LLM APIs] [Google] InlineData

- [+] style(google.ts): update comment for InlineData interface

* Todo [LLM APIs] [Google] InlineData

- [+] todo(google.ts): add TODO comment to improve safety settings configuration

* Todo [UI/UX] [Front End] [Chat] Summarize

- [+] chore(chat.ts): add TODO comment to improve the summary for gemini-pro-vision

---------

Co-authored-by: TheRamU <[email protected]>
  • Loading branch information
H0llyW00dzZ and TheRamU authored Feb 20, 2024
1 parent fc51375 commit 49fa2d0
Show file tree
Hide file tree
Showing 17 changed files with 681 additions and 93 deletions.
10 changes: 9 additions & 1 deletion app/client/api.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,9 +14,17 @@ export type MessageRole = (typeof ROLES)[number];
export const Models = ["gpt-3.5-turbo", "gpt-4"] as const;
export type ChatModel = ModelType;

export interface MultimodalContent {
type: "text" | "image_url";
text?: string;
image_url?: {
url: string;
};
}

export interface RequestMessage {
role: MessageRole;
content: string;
content: string | MultimodalContent[];
}

export interface LLMConfig {
Expand Down
88 changes: 71 additions & 17 deletions app/client/platforms/google.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,12 @@ import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";
import { getClientConfig } from "@/app/config/client";
import Locale from "../../locales";
import { getServerSideConfig } from "@/app/config/server";
import { getProviderFromState } from "@/app/utils";
import {
getProviderFromState,
getMessageTextContent,
getMessageImages,
isVisionModel,
} from "@/app/utils";
import { getNewStuff } from './NewStuffLLMs';


Expand All @@ -35,7 +40,8 @@ interface GoogleResponse {
* Represents a part of a message, typically containing text.
*/
interface MessagePart {
text: string;
text?: string;
inline_data?: InlineData;
}

/**
Expand All @@ -46,6 +52,12 @@ interface Message {
parts: MessagePart[];
}

// easy maintain, unlike stupid "any any any"
interface InlineData {
mime_type: string;
data: string;
}

/**
* Configuration for the AI model used within the chat method.
*/
Expand Down Expand Up @@ -104,10 +116,35 @@ export class GeminiProApi implements LLMApi {
const provider = getProviderFromState();
const cfgspeed_animation = useAppConfig.getState().speed_animation; // Get the animation speed from the app config
// const apiClient = this;
const messages: Message[] = options.messages.map((v) => ({
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: [{ text: v.content }],
}));
const visionModel = isVisionModel(options.config.model);
let multimodal = false;

// Construct messages with the correct types
const messages: Message[] = options.messages.map((v) => {
let parts: MessagePart[] = [{ text: getMessageTextContent(v) }];
if (visionModel) {
const images = getMessageImages(v);
if (images.length > 0) {
multimodal = true;
parts = parts.concat(
images.map((image) => {
const imageType = image.split(";")[0].split(":")[1];
const imageData = image.split(",")[1];
return {
inline_data: {
mime_type: imageType,
data: imageData,
},
};
}),
);
}
}
return {
role: v.role.replace("assistant", "model").replace("system", "user"),
parts: parts,
};
});

// google requires that role in neighboring messages must not be the same
for (let i = 0; i < messages.length - 1;) {
Expand All @@ -118,8 +155,6 @@ export class GeminiProApi implements LLMApi {
i++;
}
}

const appConfig = useAppConfig.getState().modelConfig;
const chatConfig = useChatStore.getState().currentSession().mask.modelConfig;

// Call getNewStuff to determine the max_tokens and other configurations
Expand All @@ -130,11 +165,15 @@ export class GeminiProApi implements LLMApi {
chatConfig.useMaxTokens,
);

const modelConfig: ModelConfig = {
...appConfig,
...chatConfig,
// Use max_tokens from getNewStuff if defined, otherwise use the existing value
max_tokens: max_tokens !== undefined ? max_tokens : options.config.max_tokens,
// if (visionModel && messages.length > 1) {
// options.onError?.(new Error("Multiturn chat is not enabled for models/gemini-pro-vision"));
// }
const modelConfig = {
...useAppConfig.getState().modelConfig,
...useChatStore.getState().currentSession().mask.modelConfig,
...{
model: options.config.model,
},
};

const requestPayload = {
Expand All @@ -148,6 +187,7 @@ export class GeminiProApi implements LLMApi {
topP: modelConfig.top_p,
// "topK": modelConfig.top_k,
},
// TODO: Improve safety settings to make them configurable, similar to the rich terminal interface chat feature in GoGenAI, which is written in Go.
safetySettings: [
{
category: "HARM_CATEGORY_HARASSMENT",
Expand Down Expand Up @@ -177,15 +217,16 @@ export class GeminiProApi implements LLMApi {
const controller = new AbortController();
options.onController?.(controller);
try {
// Note: With this refactoring, it's now possible to use `v1`, `v1beta` in the settings.
// However, this is just temporary and might need to be changed in the future.
let chatPath = this.path(accessStore.googleApiVersion + Google.ChatPath);
let googleChatPath = visionModel
? Google.VisionChatPath
: Google.ChatPath;
let chatPath = this.path(googleChatPath);

// let baseUrl = accessStore.googleUrl;

if (!baseUrl) {
baseUrl = isApp
? DEFAULT_API_HOST + "/api/proxy/google/" + accessStore.googleApiVersion + Google.ChatPath
? DEFAULT_API_HOST + "/api/proxy/google/" + googleChatPath
: chatPath;
}

Expand Down Expand Up @@ -252,6 +293,19 @@ export class GeminiProApi implements LLMApi {
value,
}): Promise<any> {
if (done) {
if (response.status !== 200) {
try {
let data = JSON.parse(ensureProperEnding(partialData));
if (data && data[0].error) {
options.onError?.(new Error(data[0].error.message));
} else {
options.onError?.(new Error("Request failed"));
}
} catch (_) {
options.onError?.(new Error("Request failed"));
}
}

console.log("[Streaming] Stream complete");
// options.onFinish(responseText + remainText);
finished = true;
Expand Down
44 changes: 35 additions & 9 deletions app/client/platforms/openai.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@ import {
} from "@/app/constant";
import { useAccessStore, useAppConfig, useChatStore } from "@/app/store";

import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api";
import {
ChatOptions,
getHeaders,
LLMApi,
LLMModel,
LLMUsage,
MultimodalContent,
} from "../api";
import Locale from "../../locales";
import {
EventStreamContentType,
Expand All @@ -24,6 +31,11 @@ import { prettyObject } from "@/app/utils/format";
import { getClientConfig } from "@/app/config/client";
import { getProviderFromState } from "@/app/utils";
import { makeAzurePath } from "@/app/azure";
import {
getMessageTextContent,
getMessageImages,
isVisionModel,
} from "@/app/utils";

export interface OpenAIListModelResponse {
object: string;
Expand Down Expand Up @@ -88,6 +100,7 @@ export class ChatGPTApi implements LLMApi {
*
*/
async chat(options: ChatOptions) {
const visionModel = isVisionModel(options.config.model);
/**
* The text moderation configuration.
* @remarks
Expand All @@ -97,24 +110,37 @@ export class ChatGPTApi implements LLMApi {
const textmoderation = useAppConfig.getState().textmoderation;
const checkprovider = getProviderFromState();
const userMessageS = options.messages.filter((msg) => msg.role === "user");
const lastUserMessage = userMessageS[userMessageS.length - 1]?.content;
const lastUserMessageContent = userMessageS[userMessageS.length - 1]?.content;
let textToModerate = '';

if (typeof lastUserMessageContent === 'string') {
textToModerate = lastUserMessageContent;
} else if (Array.isArray(lastUserMessageContent)) {
// If it's an array of MultimodalContent, concatenate all text elements into a single string
textToModerate = lastUserMessageContent
.filter(content => content.type === 'text' && typeof content.text === 'string')
.map(content => content.text)
.join(' ');
}

// Now textToModerate is guaranteed to be a string
const moderationPath = this.path(OpenaiPath.ModerationPath);

// Check if text moderation is enabled and required
if (textmoderation !== false
&& options.whitelist !== true
// Skip text moderation for Azure provider since azure already have text-moderation, and its enabled by default on their service
&& checkprovider !== ServiceProvider.Azure) {
if (textmoderation !== false &&
options.whitelist !== true &&
checkprovider !== ServiceProvider.Azure &&
textToModerate) { // Ensure textToModerate is not empty
// Call the moderateText method and handle the result
const moderationResult = await moderateText(moderationPath, lastUserMessage, OpenaiPath.TextModerationModels.latest);
const moderationResult = await moderateText(moderationPath, textToModerate, OpenaiPath.TextModerationModels.latest);
if (moderationResult) {
options.onFinish(moderationResult); // Finish early if moderationResult is not null
return;
}
}

const messages = options.messages.map((v) => ({
role: v.role,
content: v.content,
content: visionModel ? v.content : getMessageTextContent(v),
}));

const modelConfig = {
Expand Down
16 changes: 15 additions & 1 deletion app/components/chat-list.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -136,11 +136,25 @@ export function ChatList(props: { narrow?: boolean; search: string }) {
let foundKeyword = false;

item.messages.forEach((message) => {
// Check if content is a string before calling includes
// console.log(chatListSearch, message.content, message.content.includes(chatListSearch))
if (message.content.includes(props.search)) {
if (typeof message.content === 'string' && message.content.includes(props.search)) {
foundKeyword = true;
return;
}

// If content is an array of MultimodalContent, you might need to handle it differently
if (Array.isArray(message.content)) {
// Handle the case where content is an array of MultimodalContent
message.content.forEach((multimodalContent) => {
if (multimodalContent.type === 'text' &&
multimodalContent.text &&
multimodalContent.text.includes(props.search)) {
foundKeyword = true;
return;
}
});
}
});

return foundKeyword;
Expand Down
Loading

0 comments on commit 49fa2d0

Please sign in to comment.