diff --git a/action.yml b/action.yml index 8c96b0ba..90b8f2ee 100644 --- a/action.yml +++ b/action.yml @@ -101,8 +101,6 @@ inputs: !**/*.pb.go !**/*.lock !**/*.ttf - !**/*.yaml - !**/*.yml !**/*.cfg !**/*.toml !**/*.ini @@ -136,6 +134,8 @@ inputs: !**/*.min.js.css !**/*.tfstate !**/*.tfstate.backup + !**/vault.yml + !**/vault.yaml disable_review: required: false description: 'Only provide the summary and skip the code review.' @@ -156,7 +156,7 @@ inputs: openai_heavy_model: required: false description: 'Model to use for complex tasks such as code reviews.' - default: 'gpt-4' + default: 'gpt-4-turbo-preview' openai_model_temperature: required: false description: 'Temperature for GPT model' @@ -212,9 +212,6 @@ inputs: specific files within 80 words. - **Changes**: A markdown table of files and their summaries. Group files with similar changes together into a single row to save space. - - **Poem**: Below the changes, include a whimsical, short poem written by - a rabbit to celebrate the changes. Format the poem as a quote using - the ">" symbol and feel free to use emojis where relevant. Avoid additional commentary as this summary will be added as a comment on the GitHub pull request. Use the titles "Walkthrough" and "Changes" and they must be H2. diff --git a/dist/index.js b/dist/index.js index a6f49664..6847d334 100644 --- a/dist/index.js +++ b/dist/index.js @@ -6525,21 +6525,28 @@ class TokenLimits { knowledgeCutOff; constructor(model = 'gpt-3.5-turbo') { this.knowledgeCutOff = '2021-09-01'; - if (model === 'gpt-4-32k') { - this.maxTokens = 32600; - this.responseTokens = 4000; - } - else if (model === 'gpt-3.5-turbo-16k') { - this.maxTokens = 16300; - this.responseTokens = 3000; - } - else if (model === 'gpt-4') { - this.maxTokens = 8000; - this.responseTokens = 2000; - } - else { - this.maxTokens = 4000; - this.responseTokens = 1000; + switch (model) { + case 'gpt-4-32k': + this.maxTokens = 32600; + this.responseTokens = 4000; + break; + case 'gpt-3.5-turbo-16k': + this.maxTokens = 16300; + this.responseTokens = 3000; + break; + case 'gpt-4': + this.maxTokens = 8000; + this.responseTokens = 2000; + break; + case 'gpt-4-turbo-preview': + this.maxTokens = 128000; + this.responseTokens = 4000; + this.knowledgeCutOff = '2023-12-01'; + break; + default: + this.maxTokens = 4000; + this.responseTokens = 1000; + break; } // provide some margin for the request tokens this.requestTokens = this.maxTokens - this.responseTokens - 100; diff --git a/src/limits.ts b/src/limits.ts index aca807f6..a174ec55 100644 --- a/src/limits.ts +++ b/src/limits.ts @@ -6,19 +6,30 @@ export class TokenLimits { constructor(model = 'gpt-3.5-turbo') { this.knowledgeCutOff = '2021-09-01' - if (model === 'gpt-4-32k') { - this.maxTokens = 32600 - this.responseTokens = 4000 - } else if (model === 'gpt-3.5-turbo-16k') { - this.maxTokens = 16300 - this.responseTokens = 3000 - } else if (model === 'gpt-4') { - this.maxTokens = 8000 - this.responseTokens = 2000 - } else { - this.maxTokens = 4000 - this.responseTokens = 1000 + switch (model) { + case 'gpt-4-32k': + this.maxTokens = 32600 + this.responseTokens = 4000 + break + case 'gpt-3.5-turbo-16k': + this.maxTokens = 16300 + this.responseTokens = 3000 + break + case 'gpt-4': + this.maxTokens = 8000 + this.responseTokens = 2000 + break + case 'gpt-4-turbo-preview': + this.maxTokens = 128000 + this.responseTokens = 4000 + this.knowledgeCutOff = '2023-12-01' + break + default: + this.maxTokens = 4000 + this.responseTokens = 1000 + break } + // provide some margin for the request tokens this.requestTokens = this.maxTokens - this.responseTokens - 100 }