Skip to content

Commit 2da502d

Browse files
committed
Merge remote-tracking branch 'up/website' into website
# Conflicts: # app/store/chat.ts
2 parents 370ce3e + 20ae4f5 commit 2da502d

25 files changed

+380
-54
lines changed

app/api/openai.ts

+3-1
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,9 @@ function getModels(remoteModelRes: OpenAIListModelResponse) {
1313

1414
if (config.disableGPT4) {
1515
remoteModelRes.data = remoteModelRes.data.filter(
16-
(m) => !m.id.startsWith("gpt-4") || m.id.startsWith("gpt-4o-mini"),
16+
(m) =>
17+
!(m.id.startsWith("gpt-4") || m.id.startsWith("chatgpt-4o")) ||
18+
m.id.startsWith("gpt-4o-mini"),
1719
);
1820
}
1921

app/client/platforms/anthropic.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ export class ClaudeApi implements LLMApi {
203203
const [tools, funcs] = usePluginStore
204204
.getState()
205205
.getAsTools(
206-
useChatStore.getState().currentSession().mask?.plugin as string[],
206+
useChatStore.getState().currentSession().mask?.plugin || [],
207207
);
208208
return stream(
209209
path,

app/client/platforms/moonshot.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -125,7 +125,7 @@ export class MoonshotApi implements LLMApi {
125125
const [tools, funcs] = usePluginStore
126126
.getState()
127127
.getAsTools(
128-
useChatStore.getState().currentSession().mask?.plugin as string[],
128+
useChatStore.getState().currentSession().mask?.plugin || [],
129129
);
130130
return stream(
131131
chatPath,

app/client/platforms/openai.ts

+16-11
Original file line numberDiff line numberDiff line change
@@ -160,6 +160,7 @@ export class ChatGPTApi implements LLMApi {
160160
let requestPayload: RequestPayload | DalleRequestPayload;
161161

162162
const isDalle3 = _isDalle3(options.config.model);
163+
const isO1 = options.config.model.startsWith("o1");
163164
if (isDalle3) {
164165
const prompt = getMessageTextContent(
165166
options.messages.slice(-1)?.pop() as any,
@@ -181,30 +182,32 @@ export class ChatGPTApi implements LLMApi {
181182
const content = visionModel
182183
? await preProcessImageContent(v.content)
183184
: getMessageTextContent(v);
184-
messages.push({ role: v.role, content });
185+
if (!(isO1 && v.role === "system"))
186+
messages.push({ role: v.role, content });
185187
}
186188

189+
// O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
187190
requestPayload = {
188191
messages,
189-
stream: options.config.stream,
192+
stream: !isO1 ? options.config.stream : false,
190193
model: modelConfig.model,
191-
temperature: modelConfig.temperature,
192-
presence_penalty: modelConfig.presence_penalty,
193-
frequency_penalty: modelConfig.frequency_penalty,
194-
top_p: modelConfig.top_p,
194+
temperature: !isO1 ? modelConfig.temperature : 1,
195+
presence_penalty: !isO1 ? modelConfig.presence_penalty : 0,
196+
frequency_penalty: !isO1 ? modelConfig.frequency_penalty : 0,
197+
top_p: !isO1 ? modelConfig.top_p : 1,
195198
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
196199
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
197200
};
198201

199202
// add max_tokens to vision model
200-
if (visionModel && modelConfig.model.includes("preview")) {
203+
if (visionModel) {
201204
requestPayload["max_tokens"] = Math.max(modelConfig.max_tokens, 4000);
202205
}
203206
}
204207

205208
console.log("[Request] openai payload: ", requestPayload);
206209

207-
const shouldStream = !isDalle3 && !!options.config.stream;
210+
const shouldStream = !isDalle3 && !!options.config.stream && !isO1;
208211
const controller = new AbortController();
209212
options.onController?.(controller);
210213

@@ -244,7 +247,7 @@ export class ChatGPTApi implements LLMApi {
244247
const [tools, funcs] = usePluginStore
245248
.getState()
246249
.getAsTools(
247-
useChatStore.getState().currentSession().mask?.plugin as string[],
250+
useChatStore.getState().currentSession().mask?.plugin || [],
248251
);
249252
// console.log("getAsTools", tools, funcs);
250253
stream(
@@ -313,7 +316,7 @@ export class ChatGPTApi implements LLMApi {
313316
// make a fetch request
314317
const requestTimeoutId = setTimeout(
315318
() => controller.abort(),
316-
isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
319+
isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow.
317320
);
318321

319322
const res = await fetch(chatPath, chatPayload);
@@ -407,7 +410,9 @@ export class ChatGPTApi implements LLMApi {
407410
});
408411

409412
const resJson = (await res.json()) as OpenAIListModelResponse;
410-
const chatModels = resJson.data?.filter((m) => m.id.startsWith("gpt-"));
413+
const chatModels = resJson.data?.filter(
414+
(m) => m.id.startsWith("gpt-") || m.id.startsWith("chatgpt-"),
415+
);
411416
console.log("[Models]", chatModels);
412417

413418
if (!chatModels) {

app/components/artifacts.tsx

+1-1
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ export const HTMLPreview = forwardRef<HTMLPreviewHander, HTMLPreviewProps>(
8080
}, [props.autoHeight, props.height, iframeHeight]);
8181

8282
const srcDoc = useMemo(() => {
83-
const script = `<script>new ResizeObserver((entries) => parent.postMessage({id: '${frameId}', height: entries[0].target.clientHeight}, '*')).observe(document.body)</script>`;
83+
const script = `<script>window.addEventListener("DOMContentLoaded", () => new ResizeObserver((entries) => parent.postMessage({id: '${frameId}', height: entries[0].target.clientHeight}, '*')).observe(document.body))</script>`;
8484
if (props.code.includes("<!DOCTYPE html>")) {
8585
props.code.replace("<!DOCTYPE html>", "<!DOCTYPE html>" + script);
8686
}

app/components/chat.module.scss

+48
Original file line numberDiff line numberDiff line change
@@ -646,3 +646,51 @@
646646
bottom: 30px;
647647
}
648648
}
649+
650+
.shortcut-key-container {
651+
padding: 10px;
652+
overflow-y: auto;
653+
display: flex;
654+
flex-direction: column;
655+
}
656+
657+
.shortcut-key-grid {
658+
display: grid;
659+
grid-template-columns: repeat(auto-fit, minmax(350px, 1fr));
660+
gap: 16px;
661+
}
662+
663+
.shortcut-key-item {
664+
display: flex;
665+
justify-content: space-between;
666+
align-items: center;
667+
overflow: hidden;
668+
padding: 10px;
669+
background-color: var(--white);
670+
}
671+
672+
.shortcut-key-title {
673+
font-size: 14px;
674+
color: var(--black);
675+
}
676+
677+
.shortcut-key-keys {
678+
display: flex;
679+
gap: 8px;
680+
}
681+
682+
.shortcut-key {
683+
display: flex;
684+
align-items: center;
685+
justify-content: center;
686+
border: var(--border-in-light);
687+
border-radius: 8px;
688+
padding: 4px;
689+
background-color: var(--gray);
690+
min-width: 32px;
691+
}
692+
693+
.shortcut-key span {
694+
font-size: 12px;
695+
color: var(--black);
696+
}

0 commit comments

Comments
 (0)