From 9aebb076f54263bd075fc706d53f6c6d4f2191c4 Mon Sep 17 00:00:00 2001 From: Major Hayden Date: Tue, 16 Dec 2025 14:45:15 -0600 Subject: [PATCH] test(rlsapi): add initial e2e test for v1 infer endpoint Add a basic smoke test for the RLSAPI v1 infer endpoint to verify the endpoint is reachable and returns a valid JSON response. Signed-off-by: Major Hayden --- .github/workflows/e2e_tests.yaml | 34 ++++++++++++++++--- docker-compose-library.yaml | 3 ++ docker-compose.yaml | 3 ++ .../library-mode/lightspeed-stack-rlsapi.yaml | 24 +++++++++++++ .../library-mode/lightspeed-stack.yaml | 2 +- .../server-mode/lightspeed-stack-rlsapi.yaml | 25 ++++++++++++++ .../server-mode/lightspeed-stack.yaml | 2 +- tests/e2e/features/environment.py | 14 ++++++++ tests/e2e/features/rlsapi_v1.feature | 16 +++++++++ tests/e2e/test_list.txt | 1 + 10 files changed, 117 insertions(+), 7 deletions(-) create mode 100644 tests/e2e/configuration/library-mode/lightspeed-stack-rlsapi.yaml create mode 100644 tests/e2e/configuration/server-mode/lightspeed-stack-rlsapi.yaml create mode 100644 tests/e2e/features/rlsapi_v1.feature diff --git a/.github/workflows/e2e_tests.yaml b/.github/workflows/e2e_tests.yaml index a80de744a..058d9caed 100644 --- a/.github/workflows/e2e_tests.yaml +++ b/.github/workflows/e2e_tests.yaml @@ -135,9 +135,9 @@ jobs: run: | CONFIGS_DIR="tests/e2e/configs" ENVIRONMENT="$CONFIG_ENVIRONMENT" - + echo "Looking for configurations in $CONFIGS_DIR/" - + # List available configurations if [ -d "$CONFIGS_DIR" ]; then echo "Available configurations:" @@ -146,12 +146,12 @@ jobs: echo "Configs directory '$CONFIGS_DIR' not found!" exit 1 fi - + # Determine which config file to use CONFIG_FILE="$CONFIGS_DIR/run-$ENVIRONMENT.yaml" - + echo "Looking for: $CONFIG_FILE" - + if [ -f "$CONFIG_FILE" ]; then echo "✅ Found config for environment: $ENVIRONMENT" cp "$CONFIG_FILE" run.yaml @@ -163,6 +163,30 @@ jobs: exit 1 fi + - name: Set default model for rlsapi v1 tests + run: | + # Set default model/provider for rlsapi v1 endpoint based on environment + case "${{ matrix.environment }}" in + ci) + echo "E2E_DEFAULT_PROVIDER=openai" >> $GITHUB_ENV + echo "E2E_DEFAULT_MODEL=gpt-4o-mini" >> $GITHUB_ENV + ;; + azure) + echo "E2E_DEFAULT_PROVIDER=azure" >> $GITHUB_ENV + echo "E2E_DEFAULT_MODEL=gpt-4o-mini" >> $GITHUB_ENV + ;; + vertexai) + echo "E2E_DEFAULT_PROVIDER=google-vertex" >> $GITHUB_ENV + echo "E2E_DEFAULT_MODEL=gemini-2.0-flash-exp" >> $GITHUB_ENV + ;; + *) + echo "⚠️ Unknown environment: ${{ matrix.environment }}, using defaults" + echo "E2E_DEFAULT_PROVIDER=openai" >> $GITHUB_ENV + echo "E2E_DEFAULT_MODEL=gpt-4o-mini" >> $GITHUB_ENV + ;; + esac + echo "✅ Set E2E_DEFAULT_PROVIDER=${E2E_DEFAULT_PROVIDER} and E2E_DEFAULT_MODEL=${E2E_DEFAULT_MODEL}" + - name: Show final configuration run: | echo "=== Configuration Summary ===" diff --git a/docker-compose-library.yaml b/docker-compose-library.yaml index 4733d5d6c..f2c96a0e1 100644 --- a/docker-compose-library.yaml +++ b/docker-compose-library.yaml @@ -19,6 +19,9 @@ services: # OpenAI - OPENAI_API_KEY=${OPENAI_API_KEY} - E2E_OPENAI_MODEL=${E2E_OPENAI_MODEL:-gpt-4-turbo} + # Default model for rlsapi v1 tests + - E2E_DEFAULT_PROVIDER=${E2E_DEFAULT_PROVIDER:-openai} + - E2E_DEFAULT_MODEL=${E2E_DEFAULT_MODEL:-gpt-4o-mini} # Azure - AZURE_API_KEY=${AZURE_API_KEY:-} # RHAIIS diff --git a/docker-compose.yaml b/docker-compose.yaml index 3b00c3815..c122724d1 100644 --- a/docker-compose.yaml +++ b/docker-compose.yaml @@ -55,6 +55,9 @@ services: environment: - OPENAI_API_KEY=${OPENAI_API_KEY} - AZURE_API_KEY=${AZURE_API_KEY} + # Default model for rlsapi v1 tests + - E2E_DEFAULT_PROVIDER=${E2E_DEFAULT_PROVIDER:-openai} + - E2E_DEFAULT_MODEL=${E2E_DEFAULT_MODEL:-gpt-4o-mini} depends_on: llama-stack: condition: service_healthy diff --git a/tests/e2e/configuration/library-mode/lightspeed-stack-rlsapi.yaml b/tests/e2e/configuration/library-mode/lightspeed-stack-rlsapi.yaml new file mode 100644 index 000000000..55471186a --- /dev/null +++ b/tests/e2e/configuration/library-mode/lightspeed-stack-rlsapi.yaml @@ -0,0 +1,24 @@ +name: Lightspeed Core Service (LCS) +service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + # Library mode - embeds llama-stack as library + use_as_library_client: true + library_client_config_path: run.yaml +user_data_collection: + feedback_enabled: true + feedback_storage: "/tmp/data/feedback" + transcripts_enabled: true + transcripts_storage: "/tmp/data/transcripts" +authentication: + module: "noop" +inference: + # Configure default model/provider for rlsapi v1 endpoint + # These are set per-environment in the CI workflow + default_provider: ${env.E2E_DEFAULT_PROVIDER:=openai} + default_model: ${env.E2E_DEFAULT_MODEL:=gpt-4o-mini} diff --git a/tests/e2e/configuration/library-mode/lightspeed-stack.yaml b/tests/e2e/configuration/library-mode/lightspeed-stack.yaml index e6d02d3a6..47257bfb1 100644 --- a/tests/e2e/configuration/library-mode/lightspeed-stack.yaml +++ b/tests/e2e/configuration/library-mode/lightspeed-stack.yaml @@ -16,4 +16,4 @@ user_data_collection: transcripts_enabled: true transcripts_storage: "/tmp/data/transcripts" authentication: - module: "noop" \ No newline at end of file + module: "noop" diff --git a/tests/e2e/configuration/server-mode/lightspeed-stack-rlsapi.yaml b/tests/e2e/configuration/server-mode/lightspeed-stack-rlsapi.yaml new file mode 100644 index 000000000..e57362b5c --- /dev/null +++ b/tests/e2e/configuration/server-mode/lightspeed-stack-rlsapi.yaml @@ -0,0 +1,25 @@ +name: Lightspeed Core Service (LCS) +service: + host: 0.0.0.0 + port: 8080 + auth_enabled: false + workers: 1 + color_log: true + access_log: true +llama_stack: + # Server mode - connects to separate llama-stack service + use_as_library_client: false + url: http://llama-stack:8321 + api_key: xyzzy +user_data_collection: + feedback_enabled: true + feedback_storage: "/tmp/data/feedback" + transcripts_enabled: true + transcripts_storage: "/tmp/data/transcripts" +authentication: + module: "noop" +inference: + # Configure default model/provider for rlsapi v1 endpoint + # These are set per-environment in the CI workflow + default_provider: ${env.E2E_DEFAULT_PROVIDER:=openai} + default_model: ${env.E2E_DEFAULT_MODEL:=gpt-4o-mini} diff --git a/tests/e2e/configuration/server-mode/lightspeed-stack.yaml b/tests/e2e/configuration/server-mode/lightspeed-stack.yaml index adc5b4829..cc699ba89 100644 --- a/tests/e2e/configuration/server-mode/lightspeed-stack.yaml +++ b/tests/e2e/configuration/server-mode/lightspeed-stack.yaml @@ -17,4 +17,4 @@ user_data_collection: transcripts_enabled: true transcripts_storage: "/tmp/data/transcripts" authentication: - module: "noop" \ No newline at end of file + module: "noop" diff --git a/tests/e2e/features/environment.py b/tests/e2e/features/environment.py index 09b7feeff..3f817c22a 100644 --- a/tests/e2e/features/environment.py +++ b/tests/e2e/features/environment.py @@ -171,6 +171,15 @@ def before_feature(context: Context, feature: Feature) -> None: switch_config(context.feature_config) restart_container("lightspeed-stack") + if "RlsapiConfig" in feature.tags: + mode_dir = "library-mode" if context.is_library_mode else "server-mode" + context.feature_config = ( + f"tests/e2e/configuration/{mode_dir}/lightspeed-stack-rlsapi.yaml" + ) + context.default_config_backup = create_config_backup("lightspeed-stack.yaml") + switch_config(context.feature_config) + restart_container("lightspeed-stack") + if "Feedback" in feature.tags: context.hostname = os.getenv("E2E_LSC_HOSTNAME", "localhost") context.port = os.getenv("E2E_LSC_PORT", "8080") @@ -184,6 +193,11 @@ def after_feature(context: Context, feature: Feature) -> None: restart_container("lightspeed-stack") remove_config_backup(context.default_config_backup) + if "RlsapiConfig" in feature.tags: + switch_config(context.default_config_backup) + restart_container("lightspeed-stack") + remove_config_backup(context.default_config_backup) + if "Feedback" in feature.tags: for conversation_id in context.feedback_conversations: url = f"http://{context.hostname}:{context.port}/v1/conversations/{conversation_id}" diff --git a/tests/e2e/features/rlsapi_v1.feature b/tests/e2e/features/rlsapi_v1.feature new file mode 100644 index 000000000..30045a0a8 --- /dev/null +++ b/tests/e2e/features/rlsapi_v1.feature @@ -0,0 +1,16 @@ +@RlsapiConfig +Feature: RLSAPI v1 infer endpoint + Basic tests for the RLSAPI v1 inference endpoint. + + Background: + Given The service is started locally + And REST API service prefix is /v1 + + Scenario: Verify RLSAPI v1 infer endpoint returns 200 + Given The system is in default state + When I access REST API endpoint "infer" using HTTP POST method + """ + {"question": "Say hello"} + """ + Then The status code of the response is 200 + And Content type of response should be set to "application/json" diff --git a/tests/e2e/test_list.txt b/tests/e2e/test_list.txt index 2a62eaf6c..859483300 100644 --- a/tests/e2e/test_list.txt +++ b/tests/e2e/test_list.txt @@ -9,3 +9,4 @@ features/info.feature features/query.feature features/streaming_query.feature features/rest_api.feature +features/rlsapi_v1.feature