Skip to content

Commit

Permalink
💄 style: add o1-preview and o1-mini model to github model provider
Browse files Browse the repository at this point in the history
  • Loading branch information
ymefg committed Sep 25, 2024
1 parent bfb7675 commit caaf206
Show file tree
Hide file tree
Showing 2 changed files with 52 additions and 1 deletion.
20 changes: 20 additions & 0 deletions src/config/modelProviders/github.ts
Original file line number Diff line number Diff line change
Expand Up @@ -139,6 +139,26 @@ const Github: ModelProviderCard = {
tokens: 128_000,
vision: true,
},
{
description: 'Focused on advanced reasoning and solving complex problems, including math and science tasks. Ideal for applications that require deep contextual understanding and agentic workflows.',
displayName: 'OpenAI o1-preview',
enabled: true,
functionCall: false,
id: 'o1-preview',
maxOutput: 32_768,
tokens: 128_000,
vision: true,
},
{
description: 'Smaller, faster, and 80% cheaper than o1-preview, performs well at code generation and small context operations.',
displayName: 'OpenAI o1-mini',
enabled: true,
functionCall: false,
id: 'o1-mini',
maxOutput: 65_536,
tokens: 128_000,
vision: true,
},
{
description:
'Same Phi-3-medium model, but with a larger context size for RAG or few shot prompting.',
Expand Down
33 changes: 32 additions & 1 deletion src/libs/agent-runtime/github/index.ts
Original file line number Diff line number Diff line change
@@ -1,9 +1,40 @@
import { AgentRuntimeErrorType } from '../error';
import { ModelProvider } from '../types';
import { ChatStreamPayload, ModelProvider, OpenAIChatMessage } from '../types';
import { LobeOpenAICompatibleFactory } from '../utils/openaiCompatibleFactory';

// TODO: 临时写法,后续要重构成 model card 展示配置
export const o1Models = new Set([
'o1-preview',
'o1-mini',
]);

export const pruneO1Payload = (payload: ChatStreamPayload) => ({
...payload,
frequency_penalty: 0,
messages: payload.messages.map((message: OpenAIChatMessage) => ({
...message,
role: message.role === 'system' ? 'user' : message.role,
})),
presence_penalty: 0,
stream: false,
temperature: 1,
top_p: 1,
});


export const LobeGithubAI = LobeOpenAICompatibleFactory({
baseURL: 'https://models.inference.ai.azure.com',
chatCompletion: {
handlePayload: (payload) => {
const { model } = payload;

if (o1Models.has(model)) {
return pruneO1Payload(payload) as any;
}

return { ...payload, stream: payload.stream ?? true };
},
},
debug: {
chatCompletion: () => process.env.DEBUG_GITHUB_CHAT_COMPLETION === '1',
},
Expand Down

0 comments on commit caaf206

Please sign in to comment.