Skip to content

Commit 9f59f68

Browse files
committedJul 17, 2024
feat: improved error handling and retry logic
- added .total_time value showing total generation time (including retries) - added .retries to show the number of retries required.
1 parent 015a9ce commit 9f59f68

File tree

4 files changed

+27
-28
lines changed

4 files changed

+27
-28
lines changed
 

‎examples/interface-options/include-original-response.js

+3-2
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,7 @@ To run this example, you first need to install the required modules by executing
2929
*/
3030
async function exampleUsage() {
3131
prettyHeader(
32-
'Auto Retry Failed Requests Example',
32+
'Include Original Response',
3333
description,
3434
simplePrompt,
3535
interfaceName,
@@ -50,7 +50,8 @@ async function exampleUsage() {
5050
},
5151
);
5252

53-
prettyResult(response.results);
53+
//prettyResult(response.results);
54+
console.log(response);
5455
console.log();
5556
console.timeEnd('Timer');
5657
console.log();

‎src/utils/embeddings.js

+12-14
Original file line numberDiff line numberDiff line change
@@ -140,13 +140,15 @@ async function LLMInterfaceEmbeddings(
140140
);
141141
};
142142

143-
try {
144-
const response = await retryWithBackoff(
145-
embeddingsWithRetries,
146-
interfaceOptions,
147-
);
143+
let response = {};
148144

149-
if (LLMInterface && LLMInterface.cacheManagerInstance && response) {
145+
try {
146+
response = await retryWithBackoff(embeddingsWithRetries, interfaceOptions);
147+
} catch (error) {
148+
throw error;
149+
}
150+
if (LLMInterface && LLMInterface.cacheManagerInstance && response?.results) {
151+
try {
150152
const { cacheManagerInstance } = LLMInterface;
151153

152154
if (cacheManagerInstance.cacheType === 'memory-cache') {
@@ -158,16 +160,12 @@ async function LLMInterfaceEmbeddings(
158160
cacheTimeoutSeconds,
159161
);
160162
}
163+
} catch (error) {
164+
throw error;
161165
}
162-
163-
return response;
164-
} catch (error) {
165-
throw new EmbeddingsError(
166-
`Failed to generate embeddings using LLM ${interfaceName}:`,
167-
error.message,
168-
error.stack,
169-
);
170166
}
167+
168+
return response;
171169
}
172170

173171
/**

‎src/utils/message.js

+11-12
Original file line numberDiff line numberDiff line change
@@ -129,13 +129,15 @@ async function LLMInterfaceSendMessage(
129129
return await llmInstance.sendMessage(message, options, interfaceOptions);
130130
};
131131

132+
let response = {};
132133
try {
133-
const response = await retryWithBackoff(
134-
sendMessageWithRetries,
135-
interfaceOptions,
136-
);
134+
response = await retryWithBackoff(sendMessageWithRetries, interfaceOptions);
135+
} catch (error) {
136+
throw error;
137+
}
137138

138-
if (LLMInterface && LLMInterface.cacheManagerInstance && response) {
139+
if (LLMInterface && LLMInterface.cacheManagerInstance && response?.results) {
140+
try {
139141
const { cacheManagerInstance } = LLMInterface;
140142

141143
if (cacheManagerInstance.cacheType === 'memory-cache') {
@@ -147,15 +149,12 @@ async function LLMInterfaceSendMessage(
147149
cacheTimeoutSeconds,
148150
);
149151
}
152+
} catch (error) {
153+
throw error;
150154
}
151-
152-
return response;
153-
} catch (error) {
154-
throw new SendMessageError(
155-
`Failed to send message using LLM interfaceName ${interfaceName}: ${error.message}`,
156-
error.stack,
157-
);
158155
}
156+
157+
return response;
159158
}
160159

161160
/**

‎src/utils/retryWithBackoff.js

+1
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@ async function retryWithBackoff(fn, options, errorType) {
2727
const end = hrtime(start);
2828
const milliseconds = end[0] * 1e3 + end[1] / 1e6;
2929
response.total_time = milliseconds.toFixed(5);
30+
response.retries = currentRetry;
3031
return response;
3132
}
3233
} catch (error) {

0 commit comments

Comments
 (0)
Please sign in to comment.