Skip to content

Commit dd0e768

Browse files
committed
[TEST] Add test case for lxm service internal API
- Added positive and negative TCs for lxm service internal API Signed-off-by: hyunil park <[email protected]>
1 parent f0c6e84 commit dd0e768

File tree

2 files changed

+328
-0
lines changed

2 files changed

+328
-0
lines changed

tests/capi/meson.build

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -76,6 +76,22 @@ if get_option('enable-ml-service')
7676
test('unittest_capi_service_training_offloading', unittest_capi_service_training_offloading, env: testenv, timeout: 100)
7777
endif
7878
endif
79+
80+
# LXM Service Tests
81+
# These tests require both ml-service and llamacpp to be enabled.
82+
llamacpp_dep = dependency('llama', required: false)
83+
if llamacpp_dep.found()
84+
# Note: The source file itself is also conditionally compiled with ENABLE_LLAMACPP.
85+
unittest_capi_lxm_service = executable('unittest_capi_lxm_service',
86+
'unittest_capi_lxm_service.cc',
87+
dependencies: service_unittest_deps,
88+
install: get_option('install-test'),
89+
install_dir: unittest_install_dir
90+
)
91+
test('unittest_capi_lxm_service', unittest_capi_lxm_service, env: testenv, timeout: 120) # Increased timeout for LLM response
92+
else
93+
message('LXM Service tests will be skipped because llama dependency was not found.')
94+
endif
7995
endif
8096

8197
if nnfw_dep.found()
Lines changed: 312 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,312 @@
1+
/* SPDX-License-Identifier: Apache-2.0 */
2+
/**
3+
* @file unittest_capi_lxm_service.cc
4+
* @date 26 JULY 2025
5+
* @brief Unit test for ml-lxm-service.
6+
* @see https://github.com/nnstreamer/api
7+
* @author Hyunil Park <[email protected]>
8+
* @bug No known bugs
9+
*/
10+
11+
#include <gtest/gtest.h>
12+
#include <glib.h>
13+
#include <ml-api-common.h>
14+
#include <ml-api-service-private.h>
15+
#include <ml-api-service.h>
16+
#include <string.h>
17+
#include "ml-lxm-service-internal.h"
18+
#include "unittest_util.h"
19+
20+
#if defined(ENABLE_LLAMACPP)
21+
22+
/**
23+
* @brief Internal function to get the model file path.
24+
*/
25+
static gchar *
26+
_get_model_path (const gchar *model_name)
27+
{
28+
const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");
29+
30+
/* Supposed to run test in build directory. */
31+
if (root_path == NULL)
32+
root_path = "..";
33+
34+
gchar *model_file = g_build_filename (
35+
root_path, "tests", "test_models", "models", model_name, NULL);
36+
37+
return model_file;
38+
}
39+
40+
/**
41+
* @brief Macro to skip testcase if required files are not ready.
42+
*/
43+
#define skip_lxm_tc(tc_name) \
44+
do { \
45+
g_autofree gchar *model_file = _get_model_path ("llama-2-7b-chat.Q2_K.gguf"); \
46+
if (!g_file_test (model_file, G_FILE_TEST_EXISTS)) { \
47+
g_autofree gchar *msg = g_strdup_printf ( \
48+
"Skipping '%s' due to missing model file. " \
49+
"Please download model file from https://huggingface.co/TheBloke/Llama-2-7B-Chat-GGUF.", \
50+
tc_name); \
51+
GTEST_SKIP () << msg; \
52+
} \
53+
} while (0)
54+
55+
/**
56+
* @brief Test data structure to pass to the callback.
57+
*/
58+
typedef struct {
59+
int token_count;
60+
GString *received_tokens;
61+
} lxm_test_data_s;
62+
63+
/**
64+
* @brief Callback function for LXM service token streaming.
65+
*/
66+
static void
67+
_lxm_token_cb (ml_service_event_e event, ml_information_h event_data, void *user_data)
68+
{
69+
lxm_test_data_s *tdata = (lxm_test_data_s *) user_data;
70+
ml_tensors_data_h data = NULL;
71+
void *_raw = NULL;
72+
size_t _size = 0;
73+
int status;
74+
75+
switch (event) {
76+
case ML_SERVICE_EVENT_NEW_DATA:
77+
ASSERT_TRUE (event_data != NULL);
78+
79+
status = ml_information_get (event_data, "data", &data);
80+
EXPECT_EQ (status, ML_ERROR_NONE);
81+
if (status != ML_ERROR_NONE)
82+
return;
83+
84+
status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size);
85+
EXPECT_EQ (status, ML_ERROR_NONE);
86+
if (status != ML_ERROR_NONE)
87+
return;
88+
89+
if (tdata) {
90+
if (tdata->received_tokens) {
91+
g_string_append_len (tdata->received_tokens, (const char *) _raw, _size);
92+
}
93+
tdata->token_count++;
94+
}
95+
g_print ("%.*s", (int) _size, (char *) _raw); // Print received token
96+
break;
97+
default:
98+
// Handle unknown or unimplemented events if necessary
99+
g_printerr ("Received unhandled LXM service event: %d\n", event);
100+
break;
101+
}
102+
}
103+
104+
/**
105+
* @brief Internal function to run a full LXM session test.
106+
*/
107+
static void
108+
_run_lxm_session_test (const gchar *config_path, const gchar *input_text, ml_option_h options)
109+
{
110+
ml_lxm_session_h session = NULL;
111+
ml_lxm_prompt_h prompt = NULL;
112+
lxm_test_data_s tdata = { 0, NULL };
113+
int status;
114+
115+
tdata.received_tokens = g_string_new ("");
116+
117+
// 1. Create session with callback
118+
status = ml_lxm_session_create (config_path, NULL, _lxm_token_cb, &tdata, &session);
119+
ASSERT_EQ (status, ML_ERROR_NONE);
120+
ASSERT_TRUE (session != NULL);
121+
122+
// 2. Create prompt
123+
status = ml_lxm_prompt_create (&prompt);
124+
ASSERT_EQ (status, ML_ERROR_NONE);
125+
ASSERT_TRUE (prompt != NULL);
126+
127+
status = ml_lxm_prompt_append_text (prompt, input_text);
128+
ASSERT_EQ (status, ML_ERROR_NONE);
129+
130+
// 3. Generate response (callback is already set during session creation)
131+
status = ml_lxm_session_respond (session, prompt, options);
132+
ASSERT_EQ (status, ML_ERROR_NONE);
133+
134+
// Wait for the callback to receive data.
135+
// 10 seconds should be enough for a simple response.
136+
g_usleep (10000000U);
137+
138+
// 4. Verify results
139+
EXPECT_GT (tdata.token_count, 0);
140+
EXPECT_GT (tdata.received_tokens->len, 0U);
141+
142+
g_print ("\nReceived total tokens: %d\n", tdata.token_count);
143+
g_print ("Full received text: %s\n", tdata.received_tokens->str);
144+
145+
// 5. Cleanup
146+
status = ml_lxm_prompt_destroy (prompt);
147+
EXPECT_EQ (status, ML_ERROR_NONE);
148+
149+
status = ml_lxm_session_destroy (session);
150+
EXPECT_EQ (status, ML_ERROR_NONE);
151+
152+
if (tdata.received_tokens) {
153+
g_string_free (tdata.received_tokens, TRUE);
154+
}
155+
}
156+
157+
/**
158+
* @brief Test basic flow of LXM service.
159+
*/
160+
TEST (MLLxmService, basicFlow_p)
161+
{
162+
skip_lxm_tc ("basicFlow_p");
163+
164+
g_autofree gchar *config = get_config_path ("config_single_llamacpp.conf");
165+
ASSERT_TRUE (config != NULL);
166+
167+
const gchar input_text[] = "Hello LXM, how are you?";
168+
ml_option_h options = NULL;
169+
int status;
170+
171+
// Create options
172+
status = ml_option_create (&options);
173+
ASSERT_EQ (status, ML_ERROR_NONE);
174+
ASSERT_TRUE (options != NULL);
175+
176+
// Set temperature option
177+
status = ml_option_set (options, "temperature", g_strdup_printf ("%f", 0.8), g_free);
178+
ASSERT_EQ (status, ML_ERROR_NONE);
179+
180+
// Set max_tokens option
181+
status = ml_option_set (
182+
options, "max_tokens", g_strdup_printf ("%zu", (size_t) 32), g_free);
183+
ASSERT_EQ (status, ML_ERROR_NONE);
184+
185+
_run_lxm_session_test (config, input_text, options);
186+
187+
// Cleanup options
188+
ml_option_destroy (options);
189+
}
190+
191+
192+
/**
193+
* @brief Test LXM service with invalid parameters.
194+
*/
195+
TEST (MLLxmService, invalidParams_n)
196+
{
197+
ml_lxm_session_h session = NULL;
198+
ml_lxm_prompt_h prompt = NULL;
199+
int status;
200+
ml_option_h options = NULL;
201+
g_autofree gchar *valid_config = get_config_path ("config_single_llamacpp.conf");
202+
203+
// Create options for testing
204+
status = ml_option_create (&options);
205+
ASSERT_EQ (status, ML_ERROR_NONE);
206+
ml_option_set (options, "temperature", g_strdup_printf ("%f", 0.5), g_free);
207+
ml_option_set (options, "max_tokens", g_strdup_printf ("%zu", (size_t) 10), g_free);
208+
209+
// ml_lxm_session_create
210+
status = ml_lxm_session_create (valid_config, NULL, NULL, NULL, NULL);
211+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
212+
status = ml_lxm_session_create (NULL, NULL, NULL, NULL, &session);
213+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
214+
215+
status = ml_lxm_session_create ("non_existent_config.conf", NULL, NULL, NULL, &session);
216+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
217+
218+
status = ml_lxm_session_create (valid_config, NULL, NULL, NULL, &session);
219+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
220+
221+
status = ml_lxm_session_create (valid_config, NULL, _lxm_token_cb, NULL, &session);
222+
if (status == ML_ERROR_NONE) {
223+
// ml_lxm_prompt_create
224+
status = ml_lxm_prompt_create (NULL);
225+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
226+
227+
status = ml_lxm_prompt_create (&prompt);
228+
ASSERT_EQ (status, ML_ERROR_NONE);
229+
230+
// ml_lxm_prompt_append_text
231+
status = ml_lxm_prompt_append_text (NULL, "text");
232+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
233+
status = ml_lxm_prompt_append_text (prompt, NULL);
234+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
235+
236+
// ml_lxm_prompt_append_instruction
237+
status = ml_lxm_prompt_append_instruction (NULL, "instruction");
238+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
239+
status = ml_lxm_prompt_append_instruction (prompt, NULL);
240+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
241+
242+
// ml_lxm_session_set_instructions
243+
status = ml_lxm_session_set_instructions (NULL, "new instructions");
244+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
245+
status = ml_lxm_session_set_instructions (session, NULL);
246+
EXPECT_EQ (status, ML_ERROR_NONE);
247+
status = ml_lxm_session_set_instructions (session, "new instructions");
248+
EXPECT_EQ (status, ML_ERROR_NONE);
249+
250+
// ml_lxm_session_respond - callback is already set during session creation
251+
status = ml_lxm_session_respond (NULL, prompt, options);
252+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
253+
status = ml_lxm_session_respond (session, NULL, options);
254+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
255+
256+
// Now ml_lxm_session_respond should succeed with valid parameters
257+
status = ml_lxm_session_respond (session, prompt, options);
258+
EXPECT_EQ (status, ML_ERROR_NONE);
259+
260+
// ml_lxm_prompt_destroy
261+
status = ml_lxm_prompt_destroy (NULL);
262+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
263+
status = ml_lxm_prompt_destroy (prompt);
264+
EXPECT_EQ (status, ML_ERROR_NONE);
265+
prompt = NULL;
266+
267+
// ml_lxm_session_destroy
268+
status = ml_lxm_session_destroy (NULL);
269+
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);
270+
status = ml_lxm_session_destroy (session);
271+
EXPECT_EQ (status, ML_ERROR_NONE);
272+
session = NULL;
273+
} else {
274+
g_print ("Skipping part of invalidParams_n as session creation failed (possibly due to missing models/config).\n");
275+
}
276+
277+
// Cleanup options
278+
ml_option_destroy (options);
279+
}
280+
281+
/**
282+
* @brief Main function to run the test.
283+
*/
284+
int
285+
main (int argc, char **argv)
286+
{
287+
int result = -1;
288+
289+
try {
290+
testing::InitGoogleTest (&argc, argv);
291+
} catch (...) {
292+
g_warning ("catch 'testing::internal::<unnamed>::ClassUniqueToAlwaysTrue'");
293+
}
294+
295+
/* ignore tizen feature status while running the testcases */
296+
set_feature_state (ML_FEATURE, SUPPORTED);
297+
set_feature_state (ML_FEATURE_INFERENCE, SUPPORTED);
298+
set_feature_state (ML_FEATURE_SERVICE, SUPPORTED);
299+
300+
try {
301+
result = RUN_ALL_TESTS ();
302+
} catch (...) {
303+
g_warning ("catch `testing::internal::GoogleTestFailureException`");
304+
}
305+
306+
set_feature_state (ML_FEATURE, NOT_CHECKED_YET);
307+
set_feature_state (ML_FEATURE_INFERENCE, NOT_CHECKED_YET);
308+
set_feature_state (ML_FEATURE_SERVICE, NOT_CHECKED_YET);
309+
310+
return result;
311+
}
312+
#endif /* ENABLE_LLAMACPP */

0 commit comments

Comments
 (0)