@@ -160,6 +160,7 @@ export class ChatGPTApi implements LLMApi {
160
160
let requestPayload : RequestPayload | DalleRequestPayload ;
161
161
162
162
const isDalle3 = _isDalle3 ( options . config . model ) ;
163
+ const isO1 = options . config . model . startsWith ( "o1" ) ;
163
164
if ( isDalle3 ) {
164
165
const prompt = getMessageTextContent (
165
166
options . messages . slice ( - 1 ) ?. pop ( ) as any ,
@@ -181,30 +182,32 @@ export class ChatGPTApi implements LLMApi {
181
182
const content = visionModel
182
183
? await preProcessImageContent ( v . content )
183
184
: getMessageTextContent ( v ) ;
184
- messages . push ( { role : v . role , content } ) ;
185
+ if ( ! ( isO1 && v . role === "system" ) )
186
+ messages . push ( { role : v . role , content } ) ;
185
187
}
186
188
189
+ // O1 not support image, tools (plugin in ChatGPTNextWeb) and system, stream, logprobs, temperature, top_p, n, presence_penalty, frequency_penalty yet.
187
190
requestPayload = {
188
191
messages,
189
- stream : options . config . stream ,
192
+ stream : ! isO1 ? options . config . stream : false ,
190
193
model : modelConfig . model ,
191
- temperature : modelConfig . temperature ,
192
- presence_penalty : modelConfig . presence_penalty ,
193
- frequency_penalty : modelConfig . frequency_penalty ,
194
- top_p : modelConfig . top_p ,
194
+ temperature : ! isO1 ? modelConfig . temperature : 1 ,
195
+ presence_penalty : ! isO1 ? modelConfig . presence_penalty : 0 ,
196
+ frequency_penalty : ! isO1 ? modelConfig . frequency_penalty : 0 ,
197
+ top_p : ! isO1 ? modelConfig . top_p : 1 ,
195
198
// max_tokens: Math.max(modelConfig.max_tokens, 1024),
196
199
// Please do not ask me why not send max_tokens, no reason, this param is just shit, I dont want to explain anymore.
197
200
} ;
198
201
199
202
// add max_tokens to vision model
200
- if ( visionModel && modelConfig . model . includes ( "preview" ) ) {
203
+ if ( visionModel ) {
201
204
requestPayload [ "max_tokens" ] = Math . max ( modelConfig . max_tokens , 4000 ) ;
202
205
}
203
206
}
204
207
205
208
console . log ( "[Request] openai payload: " , requestPayload ) ;
206
209
207
- const shouldStream = ! isDalle3 && ! ! options . config . stream ;
210
+ const shouldStream = ! isDalle3 && ! ! options . config . stream && ! isO1 ;
208
211
const controller = new AbortController ( ) ;
209
212
options . onController ?.( controller ) ;
210
213
@@ -244,7 +247,7 @@ export class ChatGPTApi implements LLMApi {
244
247
const [ tools , funcs ] = usePluginStore
245
248
. getState ( )
246
249
. getAsTools (
247
- useChatStore . getState ( ) . currentSession ( ) . mask ?. plugin as string [ ] ,
250
+ useChatStore . getState ( ) . currentSession ( ) . mask ?. plugin || [ ] ,
248
251
) ;
249
252
// console.log("getAsTools", tools, funcs);
250
253
stream (
@@ -313,7 +316,7 @@ export class ChatGPTApi implements LLMApi {
313
316
// make a fetch request
314
317
const requestTimeoutId = setTimeout (
315
318
( ) => controller . abort ( ) ,
316
- isDalle3 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS , // dalle3 using b64_json is slow.
319
+ isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS , // dalle3 using b64_json is slow.
317
320
) ;
318
321
319
322
const res = await fetch ( chatPath , chatPayload ) ;
@@ -407,7 +410,9 @@ export class ChatGPTApi implements LLMApi {
407
410
} ) ;
408
411
409
412
const resJson = ( await res . json ( ) ) as OpenAIListModelResponse ;
410
- const chatModels = resJson . data ?. filter ( ( m ) => m . id . startsWith ( "gpt-" ) ) ;
413
+ const chatModels = resJson . data ?. filter (
414
+ ( m ) => m . id . startsWith ( "gpt-" ) || m . id . startsWith ( "chatgpt-" ) ,
415
+ ) ;
411
416
console . log ( "[Models]" , chatModels ) ;
412
417
413
418
if ( ! chatModels ) {
0 commit comments