Skip to content

Commit fe2df7d

Browse files
committedJul 12, 2024
feat: examples v2.0.10
1 parent 20d35f1 commit fe2df7d

File tree

6 files changed

+171
-22
lines changed

6 files changed

+171
-22
lines changed
 

‎examples/basic-usage/chat.js

+87
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,87 @@
1+
/**
2+
* @file examples/basic-usage/chat.js
3+
* @description This example demonstrates a chat using an OpenAI compatible structure.
4+
*
5+
* To run this example, you first need to install the required modules by executing:
6+
*
7+
* npm install dotenv
8+
*/
9+
10+
11+
const { LLMInterface } = require('../../src/index.js');
12+
const { prettyHeader,
13+
prettyText,
14+
prettyResult,
15+
GREEN,
16+
RESET,
17+
} = require('../../src/utils/utils.js');
18+
require('dotenv').config({ path: '../../.env' });
19+
20+
// Setup your key and interface
21+
const interfaceName = 'groq';
22+
const apiKey = process.env.GROQ_API_KEY;
23+
24+
// Example description
25+
const description = `This example demonstrates a chat using an OpenAI compatible structure.
26+
27+
To run this example, you first need to install the required modules by executing:
28+
29+
npm install dotenv`;
30+
31+
/**
32+
* Main exampleUsage() function.
33+
*/
34+
async function exampleUsage() {
35+
try {
36+
console.time('Timer');
37+
// OpenAI chat.completion structure
38+
const openaiCompatibleStructure = {
39+
"model": "gemma-7b-it",
40+
"messages":
41+
[
42+
{ "role": "system", "content": "You are a helpful assistant." },
43+
{ "role": "user", "content": "Say hello with a polite greeting!" },
44+
{ "role": "system", "content": "Hello there! It's an absolute pleasure to make your acquaintance. How may I have the honor of assisting you today?" },
45+
{ "role": "user", "content": "I need help understanding low latency LLMs!" }
46+
],
47+
"max_tokens": 100
48+
}
49+
50+
// Concatenate messages into a single string
51+
const concatenatedMessages = openaiCompatibleStructure.messages.map(message => `${message.role}: ${message.content}`).join('\n');
52+
53+
54+
prettyHeader(
55+
'Chat Example',
56+
description,
57+
false,
58+
interfaceName,
59+
);
60+
61+
prettyText(`\n\n${GREEN}Prompt (OpenAI Compatible Structure):${RESET}\n\n`);
62+
console.log(openaiCompatibleStructure)
63+
console.log()
64+
65+
LLMInterface.setApiKey(interfaceName, apiKey);
66+
67+
const response = await LLMInterface.sendMessage(interfaceName, openaiCompatibleStructure);
68+
69+
/*
70+
or for the OpenAI API fans
71+
72+
const response = await LLMInterface.chat.completions.create(
73+
interfaceName
74+
openaiCompatibleStructure
75+
);
76+
77+
*/
78+
79+
prettyResult(response.results);
80+
console.timeEnd('Timer');
81+
console.log();
82+
} catch (error) {
83+
console.error('Error processing openaiCompatibleStructure:', error);
84+
}
85+
}
86+
87+
exampleUsage();

‎examples/basic-usage/prompt.js

+52
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
/**
2+
* @file examples/basic-usage/prompt.js
3+
* @description This example demonstrates submitting a string prompt.
4+
*
5+
* To run this example, you first need to install the required modules by executing:
6+
*
7+
* npm install dotenv
8+
*/
9+
10+
11+
const { LLMInterface } = require('../../src/index.js');
12+
const { prettyHeader, prettyResult } = require('../../src/utils/utils.js');
13+
const { simplePrompt, options } = require('../../src/utils/defaults.js');
14+
require('dotenv').config({ path: '../../.env' });
15+
16+
// Setup your key and interface
17+
const interfaceName = 'groq';
18+
const apiKey = process.env.GROQ_API_KEY;
19+
20+
// Example description
21+
const description = `This example demonstrates submitting a string prompt.
22+
23+
To run this example, you first need to install the required modules by executing:
24+
25+
npm install dotenv`;
26+
27+
/**
28+
* Main exampleUsage() function.
29+
*/
30+
async function exampleUsage() {
31+
try {
32+
console.time('Timer');
33+
prettyHeader(
34+
'Prompt Example',
35+
description,
36+
simplePrompt,
37+
interfaceName,
38+
);
39+
40+
LLMInterface.setApiKey(interfaceName, apiKey);
41+
42+
const response = await LLMInterface.sendMessage(interfaceName, simplePrompt, options);
43+
44+
prettyResult(response.results);
45+
console.timeEnd('Timer');
46+
console.log();
47+
} catch (error) {
48+
console.error('Error processing stream:', error);
49+
}
50+
}
51+
52+
exampleUsage();

‎examples/misc/streaming-mode.js ‎examples/basic-usage/streaming-mode.js

+24-13
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/**
2-
* @file examples/misc/streaming-mode.js
2+
* @file examples/basic-usage/streaming-mode.js
33
* @description This example demonstrates the new beta streaming functionality with the "groq" interface.
44
*
55
* To run this example, you first need to install the required modules by executing:
@@ -10,9 +10,14 @@
1010
*/
1111

1212
const { Readable } = require('stream');
13-
const { simplePrompt, options } = require('../../src/utils/defaults.js');
13+
const { simplePrompt } = require('../../src/utils/defaults.js');
1414
const { LLMInterface } = require('../../src/index.js');
15-
const { prettyHeader } = require('../../src/utils/utils.js');
15+
const { prettyHeader,
16+
prettyText,
17+
YELLOW,
18+
GREEN,
19+
RESET,
20+
} = require('../../src/utils/utils.js');
1621
require('dotenv').config({ path: '../../.env' });
1722

1823
// Setup your key and interface
@@ -80,18 +85,22 @@ async function processStream(stream) {
8085
* Main exampleUsage() function.
8186
*/
8287
async function exampleUsage() {
83-
console.log('Streaming Mode (Groq):');
84-
console.log();
85-
86-
LLMInterface.setApiKey(interfaceName, apiKey);
87-
8888
try {
89-
console.log('Process Stream');
89+
console.time('Timer');
90+
prettyHeader(
91+
'Streaming Mode',
92+
description,
93+
simplePrompt,
94+
interfaceName,
95+
);
96+
console.log();
97+
prettyText(`\n${GREEN}Response:${RESET}\n`);
9098
console.log();
99+
LLMInterface.setApiKey(interfaceName, apiKey);
91100

92101
const stream = await LLMInterface.sendMessage(interfaceName, simplePrompt, {
93102
stream: true,
94-
...options,
103+
max_tokens: 25
95104
});
96105

97106
/*
@@ -105,10 +114,12 @@ async function exampleUsage() {
105114
106115
*/
107116

108-
const result = await processStream(stream.data);
117+
await processStream(stream.data);
118+
119+
console.log();
120+
console.timeEnd('Timer');
109121
console.log();
110-
console.log('Concatenated Content');
111-
console.log(result);
122+
112123
} catch (error) {
113124
console.error('Error processing stream:', error);
114125
}

‎examples/caching/memory-response-cache.js ‎examples/caching/memory-cache.js

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/**
2-
* @file examples/caching/memory-response-cache.js
2+
* @file examples/caching/memory-cache.js
33
* @description This example demonstrates the usage of the memory cache for caching API requests.
44
*
55
* This example show LLMInterface configured with a memory cache. Subsequent calls to LLMInterface.sendMessage()
@@ -38,7 +38,7 @@ Note: This script will run faster on subsequent executions within the same sessi
3838
*/
3939
async function exampleUsage() {
4040
prettyHeader(
41-
'Memory Response Cache Example',
41+
'Memory Cache Example',
4242
description,
4343
simplePrompt,
4444
interfaceName,

‎examples/embeddings/embeddings-failover-custom.js ‎examples/embeddings/embeddings-custom-failover.js

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
/**
2-
* @file examples/embeddings/embeddings-failover-custom.js
2+
* @file examples/embeddings/embeddings-custom-failover.js
33
* @description This example demonstrates the usage of LLMInterface.embeddings() with a custom failover mechanism.
44
* To use a custom failover, ensure your selected service supports embeddings and provide LLMInterface with the associated API key.
55
*

‎examples/langchain/rag.js

+5-6
Original file line numberDiff line numberDiff line change
@@ -1,13 +1,13 @@
11
/**
22
* @file examples/langchain/rag.js
3-
* @description This example demonstrates Retrieval-Augmented Generation (RAG) with custom models built using LLMInterface, which are compatible with LangChain.
3+
* @description This example demonstrates Retrieval-Augmented Generation (RAG) with custom models built using LLMInterface, which are compatible with LangChain.js.
44
*
55
* To run this example, you need to install the required modules by executing:
66
* "npm install langchain dotenv".
77
*
88
* This example showcases how to retrieve relevant documents from a local directory, generate embeddings using a custom model built with LLMInterface, identify the most relevant context for answering a question, and construct a prompt for a language model to generate a response.
99
*
10-
* The workflow employs cosine similarity to determine document relevance and utilizes LangChain to format and process the final prompt. After completing the RAG process, a final direct query is sent to the provider, and the control answer is displayed for comparison.
10+
* The workflow employs cosine similarity to determine document relevance and utilizes LangChain.js to format and process the final prompt. After completing the RAG process, a final direct query is sent to the provider, and the control answer is displayed for comparison.
1111
*/
1212

1313
const fs = require('fs');
@@ -16,7 +16,6 @@ const {
1616
prettyHeader,
1717
prettyResult,
1818
prettyText,
19-
YELLOW,
2019
GREEN,
2120
RESET,
2221
} = require('../../src/utils/utils.js');
@@ -27,9 +26,9 @@ const HuggingFaceModel = require('./models/huggingfaceModel');
2726
const AI21Model = require('./models/ai21Model');
2827

2928
// Example description
30-
const description = `This example demonstrates the use of Retrieval-Augmented Generation (RAG) with custom models built using LLMInterface, which are compatible with LangChain. The process involves retrieving relevant documents from a local directory, generating embeddings, identifying the most pertinent context for answering a question, and constructing a prompt for a language model to generate a response.
29+
const description = `This example demonstrates the use of Retrieval-Augmented Generation (RAG) with custom models built using LLMInterface, which are compatible with LangChain.js. The process involves retrieving relevant documents from a local directory, generating embeddings, identifying the most pertinent context for answering a question, and constructing a prompt for a language model to generate a response.
3130
32-
The workflow employs cosine similarity to determine the relevance of documents and utilizes LangChain to format and process the final prompt. After completing the RAG process, a final direct query is sent to the provider, and the control answer is displayed for comparison.`;
31+
The workflow employs cosine similarity to determine the relevance of documents and utilizes LangChain.js to format and process the final prompt. After completing the RAG process, a final direct query is sent to the provider, and the control answer is displayed for comparison.`;
3332

3433
require('dotenv').config({ path: '../../.env' });
3534

@@ -112,7 +111,7 @@ async function exampleUsage(provider) {
112111

113112
console.time('Timer');
114113
prettyText(
115-
`\n${YELLOW}Use Langchain to create the PromptTemplate and invoke LLMChain${RESET}\n`,
114+
`\n${YELLOW}Use Langchain.js to create the PromptTemplate and invoke LLMChain${RESET}\n`,
116115
);
117116

118117
const promptTemplate = new PromptTemplate({

0 commit comments

Comments
 (0)
Please sign in to comment.