Skip to content

[Genesis] Add JS Release Test #430

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 6 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
158 changes: 158 additions & 0 deletions .github/workflows/node-ec2-adot-genesis-test.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,158 @@
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
## SPDX-License-Identifier: Apache-2.0

name: Node EC2 Genesis Use Case
on:
workflow_call:
inputs:
caller-workflow-name:
required: true
type: string
staging-instrumentation-name:
required: false
default: '@aws/aws-distro-opentelemetry-node-autoinstrumentation'
type: string

permissions:
id-token: write
contents: read

env:
E2E_TEST_AWS_REGION: 'us-west-2'
E2E_TEST_ACCOUNT_ID: ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ACCOUNT_ID }}
E2E_TEST_ROLE_NAME: ${{ secrets.APPLICATION_SIGNALS_E2E_TEST_ROLE_NAME }}
ADOT_WHEEL_NAME: ${{ inputs.staging-wheel-name }}
METRIC_NAMESPACE: genesis
LOG_GROUP_NAME: test/genesis
TEST_RESOURCES_FOLDER: ${GITHUB_WORKSPACE}
SAMPLE_APP_ZIP: s3://aws-appsignals-sample-app-prod-us-east-1/node-sample-app.zip

jobs:
node-ec2-adot-genesis:
runs-on: ubuntu-latest
timeout-minutes: 30
steps:
- uses: actions/checkout@v4
with:
repository: 'aws-observability/aws-application-signals-test-framework'
ref: ${{ inputs.caller-workflow-name == 'main-build' && 'main' || github.ref }}
fetch-depth: 0

- name: Set Get ADOT Instrumentation command environment variable
run: |
if [ "${{ github.event.repository.name }}" = "aws-otel-js-instrumentation" ]; then
echo GET_ADOT_INSTRUMENTATION_COMMAND="aws s3 cp s3://adot-autoinstrumentation-node-staging/${{ env.ADOT_INSTRUMENTATION_NAME }} ./${{ env.ADOT_INSTRUMENTATION_NAME }} --region us-east-1 && npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV
else
echo GET_ADOT_INSTRUMENTATION_COMMAND="npm install ${{ env.ADOT_INSTRUMENTATION_NAME }}" >> $GITHUB_ENV
fi

- name: Initiate Gradlew Daemon
uses: ./.github/workflows/actions/execute_and_retry
continue-on-error: true
with:
command: "./gradlew :validator:build"
cleanup: "./gradlew clean"
max_retry: 3
sleep_time: 60

- name: Generate testing id
run: echo TESTING_ID="${{ github.run_id }}-${{ github.run_number }}-${RANDOM}" >> $GITHUB_ENV

- name: Generate XRay and W3C trace ID
run: |
ID_1="$(printf '%08x' $(date +%s))"
ID_2="$(openssl rand -hex 12)"
W3C_TRACE_ID="${ID_1}${ID_2}"
XRAY_TRACE_ID="1-${ID_1}-${ID_2}"
PARENT_ID="$(openssl rand -hex 8)"
TRACE_ID_HEADER="Root=${XRAY_TRACE_ID};Parent=${PARENT_ID};Sampled=1"
echo "XRAY_TRACE_ID=${XRAY_TRACE_ID}" >> $GITHUB_ENV
echo "W3C_TRACE_ID=${W3C_TRACE_ID}" >> $GITHUB_ENV
echo "TRACE_ID_HEADER=${TRACE_ID_HEADER}" >> $GITHUB_ENV
echo "Generated XRay Trace ID: ${XRAY_TRACE_ID}"
echo "Generated W3C Trace ID: ${W3C_TRACE_ID}"
echo "Generated Trace ID Header: ${TRACE_ID_HEADER}"

- name: Configure AWS Credentials
uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ env.E2E_TEST_ACCOUNT_ID }}:role/${{ env.E2E_TEST_ROLE_NAME }}
aws-region: ${{ env.E2E_TEST_AWS_REGION }}

- name: Set up terraform
uses: ./.github/workflows/actions/execute_and_retry
with:
command: "wget -O- https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg"
post-command: 'echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
&& sudo apt update && sudo apt install terraform'

- name: Initiate Terraform
uses: ./.github/workflows/actions/execute_and_retry
with:
command: "cd ${{ env.TEST_RESOURCES_FOLDER }}/terraform/node/ec2/adot-genai && terraform init && terraform validate"
cleanup: "rm -rf .terraform && rm -rf .terraform.lock.hcl"
max_retry: 6

- name: Deploy service via terraform
working-directory: terraform/node/ec2/adot-genai
run: |
terraform apply -auto-approve \
-var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
-var="test_id=${{ env.TESTING_ID }}" \
-var="service_zip_url=${{ env.SAMPLE_APP_ZIP }}" \
-var="trace_id=${{ env.TRACE_ID_HEADER }}" \
-var="get_adot_wheel_command=${{ env.GET_ADOT_INSTRUMENTATION_COMMAND }}" \

- name: Get deployment info
working-directory: terraform/node/ec2/adot-genai
run: |
echo "INSTANCE_IP=$(terraform output langchain_service_public_ip)" >> $GITHUB_ENV
echo "INSTANCE_ID=$(terraform output langchain_service_instance_id)" >> $GITHUB_ENV

- name: Waiting 5 Minutes for Gen AI service to be ready and emit logs, traces, and metrics
run: sleep 300

- name: Validate generated logs
run: ./gradlew validator:run --args='-c node/ec2/adot-genai/log-validation.yml
--testing-id ${{ env.TESTING_ID }}
--endpoint http://${{ env.INSTANCE_IP }}:8000
--region ${{ env.E2E_TEST_AWS_REGION }}
--metric-namespace ${{ env.METRIC_NAMESPACE }}
--log-group ${{ env.LOG_GROUP_NAME }}
--service-name langchain-traceloop-app
--instance-id ${{ env.INSTANCE_ID }}
--trace-id ${{ env.W3C_TRACE_ID }}'

- name: Validate generated traces
if: (success() || failure()) && !cancelled()
run: ./gradlew validator:run --args='-c node/ec2/adot-genai/trace-validation.yml
--testing-id ${{ env.TESTING_ID }}
--endpoint http://${{ env.INSTANCE_IP }}:8000
--region ${{ env.E2E_TEST_AWS_REGION }}
--metric-namespace ${{ env.METRIC_NAMESPACE }}
--service-name langchain-traceloop-app
--instance-id ${{ env.INSTANCE_ID }}
--trace-id ${{ env.XRAY_TRACE_ID }}'

- name: Validate generated metrics
if: (success() || failure()) && !cancelled()
run: ./gradlew validator:run --args='-c node/ec2/adot-genai/metric-validation.yml
--testing-id ${{ env.TESTING_ID }}
--endpoint http://${{ env.INSTANCE_IP }}:8000
--region ${{ env.E2E_TEST_AWS_REGION }}
--metric-namespace ${{ env.METRIC_NAMESPACE }}
--log-group ${{ env.LOG_GROUP_NAME }}
--service-name langchain-traceloop-app
--instance-id ${{ env.INSTANCE_ID }}'

- name: Cleanup
if: always()
continue-on-error: true
working-directory: terraform/node/ec2/adot-genai
run: |
terraform destroy -auto-approve \
-var="aws_region=${{ env.E2E_TEST_AWS_REGION }}" \
-var="test_id=${{ env.TESTING_ID }}" \
-var="service_zip_url=${{ env.SAMPLE_APP_ZIP }}" \
-var="trace_id=${{ env.TRACE_ID_HEADER }}" \
-var="get_adot_wheel_command=${{ env.GET_ADOT_WHEEL_COMMAND }}"
2 changes: 1 addition & 1 deletion .github/workflows/python-ec2-genesis-test.yml
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
## Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
## SPDX-License-Identifier: Apache-2.0

name: Python EC2 Gen AI Use Case
name: Python EC2 Genesis Use Case
on:
workflow_dispatch: # be able to run the workflow on demand

Expand Down
28 changes: 28 additions & 0 deletions sample-apps/node/genai-service/customInstrumentation.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
const { trace } = require('@opentelemetry/api');
const { registerInstrumentations } = require('@opentelemetry/instrumentation');
const { HttpInstrumentation } = require('@opentelemetry/instrumentation-http');
const tracerProvider = trace.getTracerProvider();
const {
LangChainInstrumentation,
} = require("@traceloop/instrumentation-langchain");

const AgentsModule = require('langchain/agents');
const ChainsModule = require('langchain/chains');
const RunnableModule = require('@langchain/core/runnables')
const ToolsModule = require('@langchain/core/tools')
const VectorStoresModule = require('@langchain/core/vectorstores')

const traceloop = require("@traceloop/node-server-sdk")

traceloop.initialize({
appName: "myTestApp",
instrumentModules: {
langchain: {
runnablesModule: RunnableModule,
toolsModule: ToolsModule,
chainsModule: ChainsModule,
agentsModule: AgentsModule,
vectorStoreModule: VectorStoresModule,
}
}
});
60 changes: 60 additions & 0 deletions sample-apps/node/genai-service/index.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
const express = require('express');
const { BedrockChat } = require("@langchain/community/chat_models/bedrock");
const { ChatPromptTemplate } = require("@langchain/core/prompts");
const traceloop = require("@traceloop/node-server-sdk")
const logger = require('pino')()

const app = express();
app.use(express.json());
const PORT = parseInt(process.env.SAMPLE_APP_PORT || '8000', 10);

const llm = new BedrockChat({
model: "anthropic.claude-3-sonnet-20240229-v1:0",
region: "us-east-1",
temperature: 0.7,
});

const prompt = ChatPromptTemplate.fromMessages([
[
"system",
"You are a helpful assistant. Provide a helpful response to the following user input.",
],
["human", "{input}"],
]);

const chain = prompt.pipe(llm);

app.get('/health', (req, res) => {
res.json({ status: 'healthy' });
});

app.post('/ai-chat', async (req, res) => {
const { message } = req.body;

if (!message) {
return res.status(400).json({ error: 'Message is required' });
}

try {
logger.info(`Question asked: ${message}`);

const response = await traceloop.withWorkflow({ name: "sample_chat" }, () => {
return traceloop.withTask({ name: "parent_task" }, () => {
return chain.invoke({
input_language: "English",
output_language: "English",
input: message,
});
});
});

res.json({ response: response.content });
} catch (error) {
logger.error(`Error processing request: ${error.message}`);
res.status(500).json({ error: 'Internal server error' });
}
});

app.listen(PORT, () => {
logger.info(`GenAI service listening on port ${PORT}`);
});
Loading
Loading