From b75268a47bcccb74064be7e7cbda1ca820ca5a74 Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Fri, 16 May 2025 19:33:14 +0530 Subject: [PATCH 01/14] ci: Add email checker Integrate the Qualcomm Commit Emails Check Action to validate the commit's author and committer email addresses - https://github.com/qualcomm/commit-emails-check-action Signed-off-by: VISHAL KUMAR --- .github/workflows/email_checker.yml | 10 ++++++++++ 1 file changed, 10 insertions(+) create mode 100644 .github/workflows/email_checker.yml diff --git a/.github/workflows/email_checker.yml b/.github/workflows/email_checker.yml new file mode 100644 index 0000000000000..8f7cff5953031 --- /dev/null +++ b/.github/workflows/email_checker.yml @@ -0,0 +1,10 @@ +name: PR email addresses checker + +on: pull_request + +jobs: + pr-check-emails: + runs-on: ubuntu-latest + steps: + - name: Check PR emails + uses: qualcomm/commit-emails-check-action@main From 69413bbcebd9b0ed7c4fe00de49b3443df72f4a7 Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Wed, 21 May 2025 21:11:24 +0530 Subject: [PATCH 02/14] Enable email checker on qcom-next-staging branch Need email checker on qcom-next-staging branch only Co-authored-by: Nasser Grainawi Signed-off-by: VISHAL KUMAR --- .github/workflows/email_checker.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/email_checker.yml b/.github/workflows/email_checker.yml index 8f7cff5953031..dcbb8ad9e098a 100644 --- a/.github/workflows/email_checker.yml +++ b/.github/workflows/email_checker.yml @@ -1,6 +1,8 @@ name: PR email addresses checker -on: pull_request +on: + pull_request: + branches: [ qcom-next-staging ] jobs: pr-check-emails: From 202f487bf471ab166b4ed1b0e1a5c26d8ce09874 Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Fri, 23 May 2025 12:10:21 +0530 Subject: [PATCH 03/14] ci: Add kernel checkers (#5) Signed-off-by: Vishal Kumar --- .github/workflows/kernel_checkers.yml | 36 +++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100644 .github/workflows/kernel_checkers.yml diff --git a/.github/workflows/kernel_checkers.yml b/.github/workflows/kernel_checkers.yml new file mode 100644 index 0000000000000..4905090bf473e --- /dev/null +++ b/.github/workflows/kernel_checkers.yml @@ -0,0 +1,36 @@ +name: Kernel Checkers +on: + pull_request: + branches: + - qcom-next-staging + +jobs: + prepare: + runs-on: + group: GHA-Kernel-SelfHosted-RG + labels: [self-hosted, kernel-prd-u2404-x64-large-od-ephem] + steps: + - name: Checkout PR Code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + + outputs: + kernel_src: ${{ github.workspace }} + base_sha: ${{ github.event.pull_request.base.sha }} + head_sha: ${{ github.event.pull_request.head.sha }} + + checker: + needs: prepare + uses: qualcomm-linux/kernel-checkers/.github/workflows/checker.yml@main + with: + check_name: ${{ matrix.check }} + kernel_src: ${{ needs.prepare.outputs.kernel_src }} + base_sha: ${{ needs.prepare.outputs.base_sha }} + head_sha: ${{ needs.prepare.outputs.head_sha }} + + strategy: + matrix: + check: [check-uapi-headers, sparse-check, checkpatch, + dt-binding-check, dtb-check] + fail-fast: false From ecff6bf6c2c735d389a13ce5270c4d00c8d4979a Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Fri, 23 May 2025 13:05:28 +0530 Subject: [PATCH 04/14] ci: Add initial GitHub Actions for kernel CI pipeline. (#6) Includes the following components: 1. aws_s3_helper (composite action): - Supports three modes: single-upload, multi-upload, and download - Uploads files to S3 with dynamic paths based on GitHub context - Generates pre-signed URLs for uploaded files - Outputs a single URL or a JSON file of URLs depending on mode - Uploads presigned_urls.json as an artifact in multi-upload mode 2. pull_docker_image (composite action): - Clone kmake-image repository - Build the docker image using kmake-image Dockerfile 3. build (workflow): - Uses an AWS runner to build kernel artifacts - Uses pull_docker_image to get docker image - Builds the kernel using a reusable `build` action - Packages and lists key artifacts (Image, vmlinux, dtb, modules) - Uploads artifacts to S3 using `aws_s3_helper` - Cleans up workspace and appends a build summary 4. build_workspace (composite action): - Downloads required dependencies (ramdisk, systemd-boot) - Builds the kernel using Docker and outputs modules - Packages DLKM into the downloaded ramdisk 5. test_action (composite action): - Parses presigned_urls.json to extract artifact URLs - Updates metadata.json and cloudData.json using Docker + jq - Uploads updated metadata to S3 and injects its URL into cloudData - Adds firmware and ramdisk URLs via pre-signed S3 links - Generates a LAVA job definition using a Python script, qualcomm_linux/job_render repository 6. test (workflow): - Uses pull_docker_image to get docker image - Download artifact URLs list generated by build workflow - Triggers the LAVA job using job definition - Gets the LAVA job results This commit establishes a basic docker-based CI pipeline for kernel builds and automated artifact handling via AWS S3. Signed-off-by: Vishal Kumar --- .github/actions/aws_s3_helper/action.yml | 90 +++++++++++ .github/actions/build/action.yml | 37 +++++ .github/actions/lava_job_render/action.yml | 154 +++++++++++++++++++ .github/actions/pull_docker_image/action.yml | 24 +++ .github/workflows/build.yml | 75 +++++++++ .github/workflows/pre_merge.yml | 22 +++ .github/workflows/test.yml | 98 ++++++++++++ 7 files changed, 500 insertions(+) create mode 100644 .github/actions/aws_s3_helper/action.yml create mode 100644 .github/actions/build/action.yml create mode 100644 .github/actions/lava_job_render/action.yml create mode 100644 .github/actions/pull_docker_image/action.yml create mode 100644 .github/workflows/build.yml create mode 100644 .github/workflows/pre_merge.yml create mode 100644 .github/workflows/test.yml diff --git a/.github/actions/aws_s3_helper/action.yml b/.github/actions/aws_s3_helper/action.yml new file mode 100644 index 0000000000000..6ae81d7c5da1d --- /dev/null +++ b/.github/actions/aws_s3_helper/action.yml @@ -0,0 +1,90 @@ +name: AWS S3 Helper +description: Upload and download files from AWS S3 + +inputs: + s3_bucket: + description: S3 Bucket Name + required: true + local_file: + description: Local file paths + required: false + default: ../artifacts/file_list.txt + download_file: + description: Download file paths + required: false + default: '' + mode: + description: Mode of operation (upload/download) + required: true + default: single-upload + +outputs: + presigned_url: + description: Pre-signed URL for the uploaded file + value: ${{ steps.sync-data.outputs.presigned_url }} + +runs: + using: "composite" + steps: + - name: Sync Data + id: sync-data + shell: bash + env: + UPLOAD_LOCATION: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.workflow }}/${{ github.head_ref != '' && github.head_ref || github.run_id }}/ + run: | + echo "::group::$(printf '__________ %-100s' 'Process' | tr ' ' _)" + case "${{ inputs.mode }}" in + multi-upload) + echo "Uploading files to S3 bucket..." + first_line=true + # Start the JSON object + echo "{" > ${{ github.workspace }}/presigned_urls.json + while IFS= read -r file; do + if [ -f "$file" ]; then + echo "Uploading $file..." + aws s3 cp "$file" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} + echo "Uploaded $file to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" + echo "Creating Pre-signed URL for $file..." + filename=$(basename "$file") + presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}$filename --expires-in 3600) + if [ "$first_line" = true ]; then + first_line=false + else + echo "," >> ${{ github.workspace }}/presigned_urls.json + fi + # Append the pre-signed URL to the file + echo " \"${file}\": \"${presigned_url}\"" >> ${{ github.workspace }}/presigned_urls.json + echo "Pre-signed URL for $file: $presigned_url" + else + echo "Warning: $file does not exist or is not a regular file." + fi + done < "${{ inputs.local_file }}" + # Close the JSON object + echo "}" >> ${{ github.workspace }}/presigned_urls.json + ;; + single-upload) + echo "Uploading single file to S3 bucket..." + aws s3 cp "${{ inputs.local_file }}" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} + echo "Uploaded ${{ inputs.local_file }} to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" + echo "Creating Pre-signed URL for ${{ inputs.local_file }}..." + presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}${{ inputs.local_file }} --expires-in 3600) + echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT" + ;; + download) + #Download The required file from s3 + echo "Downloading files from S3 bucket..." + aws s3 sync s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }} . + ;; + *) + echo "Invalid mode. Use 'upload' or 'download'." + exit 1 + ;; + esac + + - name: Upload artifacts + if: ${{ inputs.mode == 'multi-upload' }} + uses: actions/upload-artifact@v4 + with: + name: presigned_urls.json + path: ${{ github.workspace }}/presigned_urls.json + retention-days: 1 \ No newline at end of file diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml new file mode 100644 index 0000000000000..95c45bb32aa99 --- /dev/null +++ b/.github/actions/build/action.yml @@ -0,0 +1,37 @@ +name: Build workspace +description: Build workspace + +inputs: + docker_image: + description: Docker image + required: true + default: kmake-image:latest + +runs: + using: "composite" + steps: + - name: Download artifacts + shell: bash + run: | + mkdir -p ../artifacts && \ + wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \ + wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \ + dpkg-deb -xv ../artifacts/systemd-boot-efi.deb ../artifacts/systemd + + - name: Make + shell: bash + run: | + docker run -i --rm \ + --user $(id -u):$(id -g) \ + --workdir="$PWD" \ + -v "$(dirname $PWD)":"$(dirname $PWD)" \ + ${{ inputs.docker_image }} bash -c " + make O=../kobj defconfig + make O=../kobj -j$(nproc) + make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1 + " + + - name: Package DLKM into ramdisk + shell: bash + run: | + (cd ../kobj/tar-install ; find lib/modules | cpio -o -H newc -R +0:+0 | gzip -9 >> ../../artifacts/ramdisk.gz) \ No newline at end of file diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml new file mode 100644 index 0000000000000..186a19275e388 --- /dev/null +++ b/.github/actions/lava_job_render/action.yml @@ -0,0 +1,154 @@ +name: Test Action +inputs: + docker_image: + description: Docker image + required: true + default: kmake-image:latest + +runs: + using: "composite" + steps: + - name: Process presigned_urls.json + id: process_urls + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const p = require('path'); + // Helper function to find URL by filename + function findUrlByFilename(filename) { + for (const [path, url] of Object.entries(data)) { + if (path.endsWith(filename)) { + return url; + } + } + return null; + } + const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json'); + if (fs.existsSync(filePath)) { + console.log("File exists"); + } else { + console.log("File does not exist"); + core.setFailed(`File not found: ${filePath}`); + } + // Read the JSON file + const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + // Extract URLs into variables + const modulesTarUrl = findUrlByFilename('modules.tar.xz'); + const imageUrl = findUrlByFilename('Image'); + const vmlinuxUrl = findUrlByFilename('vmlinux'); + const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb'); + // Set outputs + core.setOutput('modules_url', modulesTarUrl); + core.setOutput('image_url', imageUrl); + core.setOutput('vmlinux_url', vmlinuxUrl); + core.setOutput('dtb_url', dtbUrl); + console.log(`Modules URL: ${modulesTarUrl}`); + console.log(`Image URL: ${imageUrl}`); + console.log(`Vmlinux URL: ${vmlinuxUrl}`); + console.log(`Dtb URL: ${dtbUrl}`); + + - name: Create metadata.json + id: create_metadata + shell: bash + run: | + echo "Creating job definition" + # Create the job definition using the processed URLs + cd ../job_render + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \ + ${{ inputs.docker_image }} \ + jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json + + - name: Upload metadata.json + id: upload_metadata + uses: ./.github/actions/aws_s3_helper + with: + local_file: ../job_render/data/metadata.json + s3_bucket: qli-prd-kernel-gh-artifacts + mode: single-upload + + - name: Create template json + shell: bash + run: | + echo "Creating job definition" + metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}" + vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}" + image_url="${{ steps.process_urls.outputs.image_url }}" + modules_url="${{ steps.process_urls.outputs.modules_url }}" + # Create the job definition using the processed URLs + cd ../job_render + # using metadata_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e metadata_url="$metadata_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + # using image_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e image_url="$image_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + # using vmlinux_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e vmlinux_url="$vmlinux_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + # using modules_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e modules_url="$modules_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + - name: Update firmware and ramdisk + shell: bash + run: | + cd ../job_render + ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" + firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)" + # using ramdisk_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e ramdisk_url="$ramdisk_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + # using firmware_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e firmware_url="$firmware_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + - name: Create lava_job_definition + shell: bash + run: | + cd ../job_render + mkdir renders + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + ${{ inputs.docker_image }} \ + sh -c 'export BOOT_METHOD=fastboot && \ + export TARGET=qcs6490-rb3gen2 && \ + export TARGET_DTB=qcs6490-rb3gen2 && \ + python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json' \ No newline at end of file diff --git a/.github/actions/pull_docker_image/action.yml b/.github/actions/pull_docker_image/action.yml new file mode 100644 index 0000000000000..5f4277dc0765d --- /dev/null +++ b/.github/actions/pull_docker_image/action.yml @@ -0,0 +1,24 @@ +name: Pull docker image from ghcr +description: Pull docker image from ghcr + +inputs: + image: + description: The docker image to pull + required: true + default: kmake-image:latest + + github_token: + description: The GitHub token to use for authentication + required: true + +runs: + using: "composite" + steps: + - name: Clone kmake-image + run: | + git clone https://github.com/qualcomm-linux/kmake-image.git + + - name: Build docker image + run: | + cd kmake-image + docker build . -t kmake-image diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 0000000000000..0ff23fc40f4fc --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,75 @@ +name: _build +on: + workflow_call: + inputs: + docker_image: + description: Docker image + type: string + required: true + +jobs: + build: + runs-on: + group: GHA-Kernel-SelfHosted-RG + labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Pull docker image + uses: ./.github/actions/pull_docker_image + with: + image: ${{ inputs.docker_image }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Build workspace + id: build_workspace + uses: ./.github/actions/build + with: + docker_image: ${{ inputs.docker_image }} + + - name: Create file list for artifacts upload + run: | + touch ../artifacts/file_list.txt + tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/ + echo "modules.tar.xz" >> ../artifacts/file_list.txt + echo "../kobj/arch/arm64/boot/Image" >> ../artifacts/file_list.txt + echo "../kobj/vmlinux" >> ../artifacts/file_list.txt + echo "../kobj/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb" >> ../artifacts/file_list.txt + + - name: Upload artifacts + uses: ./.github/actions/aws_s3_helper + with: + s3_bucket: qli-prd-kernel-gh-artifacts + aws_access_key_id: ${{ secrets.AWSKEYID }} + aws_secret_access_key: ${{ secrets.AWSACCESSKEY }} + local_file: ../artifacts/file_list.txt + mode: multi-upload + + - name: Clean up + run: | + rm -rf ../artifacts + rm -rf ../kobj + rm -rf modules.tar.xz + + - name: Update summary + if: success() || failure() + shell: bash + run: | + if [ ${{ steps.build_workspace.outcome }} == 'success' ]; then + echo "Build was successful" + summary=":heavy_check_mark: Build Success" + else + echo "Build failed" + summary=":x: Build Failed" + fi + SUMMARY=' +
Build Summary + + '${summary}' +
+ ' + echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY \ No newline at end of file diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml new file mode 100644 index 0000000000000..906ebb42c80b1 --- /dev/null +++ b/.github/workflows/pre_merge.yml @@ -0,0 +1,22 @@ +name: pre_merge +on: + pull_request_target: + branches: + - qcom-next-staging + pull_request: + branches: + - qcom-next-staging + +jobs: + build: + uses: ./.github/workflows/build.yml + secrets: inherit + with: + docker_image: kmake-image:latest + + test: + needs: [build] + uses: ./.github/workflows/test.yml + secrets: inherit + with: + docker_image: kmake-image:latest \ No newline at end of file diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000000..384f4bf7a7ed7 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,98 @@ +name: _test +description: Run tests on LAVA + +on: + workflow_call: + inputs: + docker_image: + description: Docker image + type: string + required: true + default: kmake-image:latest + +jobs: + test: + runs-on: + group: GHA-Kernel-SelfHosted-RG + labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Pull docker image + uses: ./.github/actions/pull_docker_image + with: + image: ${{ inputs.docker_image }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Download URLs list + uses: actions/download-artifact@v4 + with: + name: presigned_urls.json + path: ${{ github.workspace }} + + - name: Clone lava job render scripts + run: cd .. && git clone https://github.com/qualcomm-linux/job_render + + - name: Create lava job definition + uses: ./.github/actions/lava_job_render + id: create_job_definition + with: + docker_image: ${{ inputs.docker_image }} + + - name: Submit lava job + id: submit_job + run: | + cd ../job_render + job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml") + job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id" + echo "job_id=$job_id" >> $GITHUB_OUTPUT + echo "job_url=$job_url" >> $GITHUB_OUTPUT + echo "Lava Job: $job_url" + echo "JOB_ID=$job_id" >> $GITHUB_ENV + + - name: Check lava job results + id: check_job + run: | + STATE="" + while [ "$STATE" != "Finished" ]; do + state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state) + STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') + echo "Current status: $STATE" + sleep 30 + done + health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health) + HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') + if [[ "$HEALTH" == "Complete" ]]; then + echo "Lava job passed." + summary=":heavy_check_mark: Lava job passed." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 0 + else + echo "Lava job failed." + summary=":x: Lava job failed." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 1 + fi + + - name: Update summary + if: success() || failure() + shell: bash + run: | + if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then + status=":x: Test job failed" + else + status="${{ steps.check_job.outputs.summary }}" + job_url="${{ steps.submit_job.outputs.job_url }}" + job_id="${{ steps.submit_job.outputs.job_id }}" + fi + SUMMARY=' +
'${status}' +
+ JOB ID: '${job_id}' +
+ ' + echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY \ No newline at end of file From 8e3080009b5806e7e8db014587059af7bae46e16 Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Fri, 23 May 2025 15:25:26 +0530 Subject: [PATCH 05/14] ci: Fix: add bash shell to actions & use pull_request_target only for premerge (#9) Signed-off-by: Vishal Kumar --- .github/actions/pull_docker_image/action.yml | 50 ++++++++++---------- .github/workflows/pre_merge.yml | 41 ++++++++-------- 2 files changed, 45 insertions(+), 46 deletions(-) diff --git a/.github/actions/pull_docker_image/action.yml b/.github/actions/pull_docker_image/action.yml index 5f4277dc0765d..9618615b02cf9 100644 --- a/.github/actions/pull_docker_image/action.yml +++ b/.github/actions/pull_docker_image/action.yml @@ -1,24 +1,26 @@ -name: Pull docker image from ghcr -description: Pull docker image from ghcr - -inputs: - image: - description: The docker image to pull - required: true - default: kmake-image:latest - - github_token: - description: The GitHub token to use for authentication - required: true - -runs: - using: "composite" - steps: - - name: Clone kmake-image - run: | - git clone https://github.com/qualcomm-linux/kmake-image.git - - - name: Build docker image - run: | - cd kmake-image - docker build . -t kmake-image +name: Pull docker image +description: Pull docker image + +inputs: + image: + description: The docker image to pull + required: true + default: kmake-image:latest + + github_token: + description: The GitHub token to use for authentication + required: true + +runs: + using: "composite" + steps: + - name: Clone kmake-image + shell: bash + run: | + git clone https://github.com/qualcomm-linux/kmake-image.git + + - name: Build docker image + shell: bash + run: | + cd kmake-image + docker build . -t kmake-image diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index 906ebb42c80b1..0bca8e714311f 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -1,22 +1,19 @@ -name: pre_merge -on: - pull_request_target: - branches: - - qcom-next-staging - pull_request: - branches: - - qcom-next-staging - -jobs: - build: - uses: ./.github/workflows/build.yml - secrets: inherit - with: - docker_image: kmake-image:latest - - test: - needs: [build] - uses: ./.github/workflows/test.yml - secrets: inherit - with: - docker_image: kmake-image:latest \ No newline at end of file +name: pre_merge +on: + pull_request_target: + branches: + - qcom-next-staging + +jobs: + build: + uses: ./.github/workflows/build.yml + secrets: inherit + with: + docker_image: kmake-image:latest + + test: + needs: [build] + uses: ./.github/workflows/test.yml + secrets: inherit + with: + docker_image: kmake-image:latest From f295cdc7c13d1e8ce130823ed4b29bd3f5371a7b Mon Sep 17 00:00:00 2001 From: Yuanjie Yang Date: Thu, 19 Dec 2024 15:59:45 +0800 Subject: [PATCH 06/14] FROMLIST: pinctrl: qcom: correct the ngpios entry for QCS615 Correct the ngpios entry to account for the UFS_RESET pin being exported as a GPIO in addition to the real GPIOs, allowing the UFS driver to toggle it. Fixes: b698f36a9d40 ("pinctrl: qcom: add the tlmm driver for QCS615 platform") Signed-off-by: Lijuan Gao Reviewed-by: Konrad Dybcio Link: https://lore.kernel.org/all/20241219-correct_gpio_ranges-v2-3-19af8588dbd0@quicinc.com/ Signed-off-by: Yuanjie Yang --- drivers/pinctrl/qcom/pinctrl-qcs615.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/qcom/pinctrl-qcs615.c b/drivers/pinctrl/qcom/pinctrl-qcs615.c index 23015b055f6a9..17ca743c2210f 100644 --- a/drivers/pinctrl/qcom/pinctrl-qcs615.c +++ b/drivers/pinctrl/qcom/pinctrl-qcs615.c @@ -1062,7 +1062,7 @@ static const struct msm_pinctrl_soc_data qcs615_tlmm = { .nfunctions = ARRAY_SIZE(qcs615_functions), .groups = qcs615_groups, .ngroups = ARRAY_SIZE(qcs615_groups), - .ngpios = 123, + .ngpios = 124, .tiles = qcs615_tiles, .ntiles = ARRAY_SIZE(qcs615_tiles), .wakeirq_map = qcs615_pdc_map, From 011df29acf1a03f1ec5095903124233bdb042a3a Mon Sep 17 00:00:00 2001 From: Imran Shaik Date: Thu, 9 Jan 2025 14:27:47 +0530 Subject: [PATCH 07/14] FROMLIST: clk: qcom: Add support for Camera Clock Controller on QCS8300 The QCS8300 Camera clock controller is a derivative of SA8775P, but has few additional clocks and offset differences. Hence, add support for QCS8300 Camera clock controller by extending the SA8775P CamCC. Reviewed-by: Dmitry Baryshkov Signed-off-by: Imran Shaik Link: https://lore.kernel.org/all/20250109-qcs8300-mm-patches-new-v4-0-63e8ac268b02@quicinc.com/ Patch-mainline: linux-clk @ 01/09/25, 14:27 Signed-off-by: Shiraz Hashim --- drivers/clk/qcom/camcc-sa8775p.c | 103 +++++++++++++++++++++++++++++-- 1 file changed, 98 insertions(+), 5 deletions(-) diff --git a/drivers/clk/qcom/camcc-sa8775p.c b/drivers/clk/qcom/camcc-sa8775p.c index 11bd2e2348119..bd75f59d3ffeb 100644 --- a/drivers/clk/qcom/camcc-sa8775p.c +++ b/drivers/clk/qcom/camcc-sa8775p.c @@ -10,7 +10,7 @@ #include #include -#include +#include #include "clk-alpha-pll.h" #include "clk-branch.h" @@ -1681,6 +1681,24 @@ static struct clk_branch cam_cc_sm_obs_clk = { }, }; +static struct clk_branch cam_cc_titan_top_accu_shift_clk = { + .halt_reg = 0x131f0, + .halt_check = BRANCH_HALT_VOTED, + .clkr = { + .enable_reg = 0x131f0, + .enable_mask = BIT(0), + .hw.init = &(const struct clk_init_data) { + .name = "cam_cc_titan_top_accu_shift_clk", + .parent_hws = (const struct clk_hw*[]) { + &cam_cc_xo_clk_src.clkr.hw, + }, + .num_parents = 1, + .flags = CLK_SET_RATE_PARENT, + .ops = &clk_branch2_ops, + }, + }, +}; + static struct gdsc cam_cc_titan_top_gdsc = { .gdscr = 0x131bc, .en_rest_wait_val = 0x2, @@ -1775,6 +1793,7 @@ static struct clk_regmap *cam_cc_sa8775p_clocks[] = { [CAM_CC_SLEEP_CLK_SRC] = &cam_cc_sleep_clk_src.clkr, [CAM_CC_SLOW_AHB_CLK_SRC] = &cam_cc_slow_ahb_clk_src.clkr, [CAM_CC_SM_OBS_CLK] = &cam_cc_sm_obs_clk.clkr, + [CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = NULL, [CAM_CC_XO_CLK_SRC] = &cam_cc_xo_clk_src.clkr, [CAM_CC_QDSS_DEBUG_XO_CLK] = &cam_cc_qdss_debug_xo_clk.clkr, }; @@ -1811,6 +1830,7 @@ static const struct qcom_cc_desc cam_cc_sa8775p_desc = { }; static const struct of_device_id cam_cc_sa8775p_match_table[] = { + { .compatible = "qcom,qcs8300-camcc" }, { .compatible = "qcom,sa8775p-camcc" }, { } }; @@ -1841,10 +1861,83 @@ static int cam_cc_sa8775p_probe(struct platform_device *pdev) clk_lucid_evo_pll_configure(&cam_cc_pll4, regmap, &cam_cc_pll4_config); clk_lucid_evo_pll_configure(&cam_cc_pll5, regmap, &cam_cc_pll5_config); - /* Keep some clocks always enabled */ - qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */ - qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */ - qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */ + if (of_device_is_compatible(pdev->dev.of_node, "qcom,qcs8300-camcc")) { + cam_cc_camnoc_axi_clk_src.cmd_rcgr = 0x13154; + cam_cc_camnoc_axi_clk.halt_reg = 0x1316c; + cam_cc_camnoc_axi_clk.clkr.enable_reg = 0x1316c; + cam_cc_camnoc_dcd_xo_clk.halt_reg = 0x13174; + cam_cc_camnoc_dcd_xo_clk.clkr.enable_reg = 0x13174; + + cam_cc_csi0phytimer_clk_src.cmd_rcgr = 0x15054; + cam_cc_csi1phytimer_clk_src.cmd_rcgr = 0x15078; + cam_cc_csi2phytimer_clk_src.cmd_rcgr = 0x15098; + cam_cc_csid_clk_src.cmd_rcgr = 0x13134; + + cam_cc_mclk0_clk_src.cmd_rcgr = 0x15000; + cam_cc_mclk1_clk_src.cmd_rcgr = 0x1501c; + cam_cc_mclk2_clk_src.cmd_rcgr = 0x15038; + + cam_cc_fast_ahb_clk_src.cmd_rcgr = 0x13104; + cam_cc_slow_ahb_clk_src.cmd_rcgr = 0x1311c; + cam_cc_xo_clk_src.cmd_rcgr = 0x131b8; + cam_cc_sleep_clk_src.cmd_rcgr = 0x131d4; + + cam_cc_core_ahb_clk.halt_reg = 0x131b4; + cam_cc_core_ahb_clk.clkr.enable_reg = 0x131b4; + + cam_cc_cpas_ahb_clk.halt_reg = 0x130f4; + cam_cc_cpas_ahb_clk.clkr.enable_reg = 0x130f4; + cam_cc_cpas_fast_ahb_clk.halt_reg = 0x130fc; + cam_cc_cpas_fast_ahb_clk.clkr.enable_reg = 0x130fc; + + cam_cc_csi0phytimer_clk.halt_reg = 0x1506c; + cam_cc_csi0phytimer_clk.clkr.enable_reg = 0x1506c; + cam_cc_csi1phytimer_clk.halt_reg = 0x15090; + cam_cc_csi1phytimer_clk.clkr.enable_reg = 0x15090; + cam_cc_csi2phytimer_clk.halt_reg = 0x150b0; + cam_cc_csi2phytimer_clk.clkr.enable_reg = 0x150b0; + cam_cc_csid_clk.halt_reg = 0x1314c; + cam_cc_csid_clk.clkr.enable_reg = 0x1314c; + cam_cc_csid_csiphy_rx_clk.halt_reg = 0x15074; + cam_cc_csid_csiphy_rx_clk.clkr.enable_reg = 0x15074; + cam_cc_csiphy0_clk.halt_reg = 0x15070; + cam_cc_csiphy0_clk.clkr.enable_reg = 0x15070; + cam_cc_csiphy1_clk.halt_reg = 0x15094; + cam_cc_csiphy1_clk.clkr.enable_reg = 0x15094; + cam_cc_csiphy2_clk.halt_reg = 0x150b4; + cam_cc_csiphy2_clk.clkr.enable_reg = 0x150b4; + + cam_cc_mclk0_clk.halt_reg = 0x15018; + cam_cc_mclk0_clk.clkr.enable_reg = 0x15018; + cam_cc_mclk1_clk.halt_reg = 0x15034; + cam_cc_mclk1_clk.clkr.enable_reg = 0x15034; + cam_cc_mclk2_clk.halt_reg = 0x15050; + cam_cc_mclk2_clk.clkr.enable_reg = 0x15050; + cam_cc_qdss_debug_xo_clk.halt_reg = 0x1319c; + cam_cc_qdss_debug_xo_clk.clkr.enable_reg = 0x1319c; + + cam_cc_titan_top_gdsc.gdscr = 0x131a0; + + cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CCI_3_CLK_SRC] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CSI3PHYTIMER_CLK_SRC] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_CSIPHY3_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_MCLK3_CLK_SRC] = NULL; + cam_cc_sa8775p_clocks[CAM_CC_TITAN_TOP_ACCU_SHIFT_CLK] = + &cam_cc_titan_top_accu_shift_clk.clkr; + + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x13178); /* CAM_CC_CAMNOC_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x131d0); /* CAM_CC_GDSC_CLK */ + qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_SLEEP_CLK */ + } else { + /* Keep some clocks always enabled */ + qcom_branch_set_clk_en(regmap, 0x13194); /* CAM_CC_CAMNOC_XO_CLK */ + qcom_branch_set_clk_en(regmap, 0x131ec); /* CAM_CC_GDSC_CLK */ + qcom_branch_set_clk_en(regmap, 0x13208); /* CAM_CC_SLEEP_CLK */ + } ret = qcom_cc_really_probe(&pdev->dev, &cam_cc_sa8775p_desc, regmap); From 8310781737079f87c05e06bade01b6414aa1bbf3 Mon Sep 17 00:00:00 2001 From: Taniya Das Date: Tue, 22 Oct 2024 17:22:53 +0530 Subject: [PATCH 08/14] FROMGIT: clk: qcom: gcc: Add support for QCS615 GCC clocks Add the global clock controller support for QCS615 SoC. Signed-off-by: Taniya Das Reviewed-by: Dmitry Baryshkov Reviewed-by: Imran Shaik --- drivers/clk/qcom/Kconfig | 8 ++++++++ drivers/clk/qcom/Makefile | 1 + 2 files changed, 9 insertions(+) diff --git a/drivers/clk/qcom/Kconfig b/drivers/clk/qcom/Kconfig index 7d5dac26b244b..3d96baa705a46 100644 --- a/drivers/clk/qcom/Kconfig +++ b/drivers/clk/qcom/Kconfig @@ -519,6 +519,14 @@ config QCS_GCC_8300 QCS8300 devices. Say Y if you want to use peripheral devices such as UART, SPI, I2C, USB, SD/UFS, PCIe etc. +config QCS_GCC_615 + tristate "QCS615 Global Clock Controller" + depends on ARM64 || COMPILE_TEST + select QCOM_GDSC + help + Support for the global clock controller on QCS615 devices. + Say Y if you want to use multimedia devices or peripheral + devices such as UART, SPI, I2C, USB, SD/eMMC, PCIe etc. config QCS_GCC_615 tristate "QCS615 Global Clock Controller" diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile index 96862e99e5d43..61a783c01c5dc 100644 --- a/drivers/clk/qcom/Makefile +++ b/drivers/clk/qcom/Makefile @@ -76,6 +76,7 @@ obj-$(CONFIG_QCM_DISPCC_2290) += dispcc-qcm2290.o obj-$(CONFIG_QCS_GCC_404) += gcc-qcs404.o obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o obj-$(CONFIG_QCS_GCC_8300) += gcc-qcs8300.o +obj-$(CONFIG_QCS_GCC_615) += gcc-qcs615.o obj-$(CONFIG_QCS_Q6SSTOP_404) += q6sstop-qcs404.o obj-$(CONFIG_QCS_TURING_404) += turingcc-qcs404.o obj-$(CONFIG_QDU_ECPRICC_1000) += ecpricc-qdu1000.o From c2c9f6ade3811c76a7e96159e8ce5d06a168e6d4 Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Tue, 27 May 2025 12:16:54 +0530 Subject: [PATCH 09/14] ci: Fix: remove dos lines (#14) Signed-off-by: Vishal Kumar --- .github/actions/aws_s3_helper/action.yml | 178 ++++++------ .github/actions/build/action.yml | 72 ++--- .github/actions/lava_job_render/action.yml | 308 ++++++++++----------- .github/workflows/build.yml | 150 +++++----- .github/workflows/test.yml | 194 ++++++------- 5 files changed, 451 insertions(+), 451 deletions(-) diff --git a/.github/actions/aws_s3_helper/action.yml b/.github/actions/aws_s3_helper/action.yml index 6ae81d7c5da1d..5c94170ea1138 100644 --- a/.github/actions/aws_s3_helper/action.yml +++ b/.github/actions/aws_s3_helper/action.yml @@ -1,90 +1,90 @@ -name: AWS S3 Helper -description: Upload and download files from AWS S3 - -inputs: - s3_bucket: - description: S3 Bucket Name - required: true - local_file: - description: Local file paths - required: false - default: ../artifacts/file_list.txt - download_file: - description: Download file paths - required: false - default: '' - mode: - description: Mode of operation (upload/download) - required: true - default: single-upload - -outputs: - presigned_url: - description: Pre-signed URL for the uploaded file - value: ${{ steps.sync-data.outputs.presigned_url }} - -runs: - using: "composite" - steps: - - name: Sync Data - id: sync-data - shell: bash - env: - UPLOAD_LOCATION: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.workflow }}/${{ github.head_ref != '' && github.head_ref || github.run_id }}/ - run: | - echo "::group::$(printf '__________ %-100s' 'Process' | tr ' ' _)" - case "${{ inputs.mode }}" in - multi-upload) - echo "Uploading files to S3 bucket..." - first_line=true - # Start the JSON object - echo "{" > ${{ github.workspace }}/presigned_urls.json - while IFS= read -r file; do - if [ -f "$file" ]; then - echo "Uploading $file..." - aws s3 cp "$file" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} - echo "Uploaded $file to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" - echo "Creating Pre-signed URL for $file..." - filename=$(basename "$file") - presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}$filename --expires-in 3600) - if [ "$first_line" = true ]; then - first_line=false - else - echo "," >> ${{ github.workspace }}/presigned_urls.json - fi - # Append the pre-signed URL to the file - echo " \"${file}\": \"${presigned_url}\"" >> ${{ github.workspace }}/presigned_urls.json - echo "Pre-signed URL for $file: $presigned_url" - else - echo "Warning: $file does not exist or is not a regular file." - fi - done < "${{ inputs.local_file }}" - # Close the JSON object - echo "}" >> ${{ github.workspace }}/presigned_urls.json - ;; - single-upload) - echo "Uploading single file to S3 bucket..." - aws s3 cp "${{ inputs.local_file }}" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} - echo "Uploaded ${{ inputs.local_file }} to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" - echo "Creating Pre-signed URL for ${{ inputs.local_file }}..." - presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}${{ inputs.local_file }} --expires-in 3600) - echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT" - ;; - download) - #Download The required file from s3 - echo "Downloading files from S3 bucket..." - aws s3 sync s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }} . - ;; - *) - echo "Invalid mode. Use 'upload' or 'download'." - exit 1 - ;; - esac - - - name: Upload artifacts - if: ${{ inputs.mode == 'multi-upload' }} - uses: actions/upload-artifact@v4 - with: - name: presigned_urls.json - path: ${{ github.workspace }}/presigned_urls.json +name: AWS S3 Helper +description: Upload and download files from AWS S3 + +inputs: + s3_bucket: + description: S3 Bucket Name + required: true + local_file: + description: Local file paths + required: false + default: ../artifacts/file_list.txt + download_file: + description: Download file paths + required: false + default: '' + mode: + description: Mode of operation (upload/download) + required: true + default: single-upload + +outputs: + presigned_url: + description: Pre-signed URL for the uploaded file + value: ${{ steps.sync-data.outputs.presigned_url }} + +runs: + using: "composite" + steps: + - name: Sync Data + id: sync-data + shell: bash + env: + UPLOAD_LOCATION: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.workflow }}/${{ github.head_ref != '' && github.head_ref || github.run_id }}/ + run: | + echo "::group::$(printf '__________ %-100s' 'Process' | tr ' ' _)" + case "${{ inputs.mode }}" in + multi-upload) + echo "Uploading files to S3 bucket..." + first_line=true + # Start the JSON object + echo "{" > ${{ github.workspace }}/presigned_urls.json + while IFS= read -r file; do + if [ -f "$file" ]; then + echo "Uploading $file..." + aws s3 cp "$file" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} + echo "Uploaded $file to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" + echo "Creating Pre-signed URL for $file..." + filename=$(basename "$file") + presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}$filename --expires-in 3600) + if [ "$first_line" = true ]; then + first_line=false + else + echo "," >> ${{ github.workspace }}/presigned_urls.json + fi + # Append the pre-signed URL to the file + echo " \"${file}\": \"${presigned_url}\"" >> ${{ github.workspace }}/presigned_urls.json + echo "Pre-signed URL for $file: $presigned_url" + else + echo "Warning: $file does not exist or is not a regular file." + fi + done < "${{ inputs.local_file }}" + # Close the JSON object + echo "}" >> ${{ github.workspace }}/presigned_urls.json + ;; + single-upload) + echo "Uploading single file to S3 bucket..." + aws s3 cp "${{ inputs.local_file }}" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} + echo "Uploaded ${{ inputs.local_file }} to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" + echo "Creating Pre-signed URL for ${{ inputs.local_file }}..." + presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}${{ inputs.local_file }} --expires-in 3600) + echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT" + ;; + download) + #Download The required file from s3 + echo "Downloading files from S3 bucket..." + aws s3 sync s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }} . + ;; + *) + echo "Invalid mode. Use 'upload' or 'download'." + exit 1 + ;; + esac + + - name: Upload artifacts + if: ${{ inputs.mode == 'multi-upload' }} + uses: actions/upload-artifact@v4 + with: + name: presigned_urls.json + path: ${{ github.workspace }}/presigned_urls.json retention-days: 1 \ No newline at end of file diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml index 95c45bb32aa99..c998e4d418cf9 100644 --- a/.github/actions/build/action.yml +++ b/.github/actions/build/action.yml @@ -1,37 +1,37 @@ -name: Build workspace -description: Build workspace - -inputs: - docker_image: - description: Docker image - required: true - default: kmake-image:latest - -runs: - using: "composite" - steps: - - name: Download artifacts - shell: bash - run: | - mkdir -p ../artifacts && \ - wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \ - wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \ - dpkg-deb -xv ../artifacts/systemd-boot-efi.deb ../artifacts/systemd - - - name: Make - shell: bash - run: | - docker run -i --rm \ - --user $(id -u):$(id -g) \ - --workdir="$PWD" \ - -v "$(dirname $PWD)":"$(dirname $PWD)" \ - ${{ inputs.docker_image }} bash -c " - make O=../kobj defconfig - make O=../kobj -j$(nproc) - make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1 - " - - - name: Package DLKM into ramdisk - shell: bash - run: | +name: Build workspace +description: Build workspace + +inputs: + docker_image: + description: Docker image + required: true + default: kmake-image:latest + +runs: + using: "composite" + steps: + - name: Download artifacts + shell: bash + run: | + mkdir -p ../artifacts && \ + wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \ + wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \ + dpkg-deb -xv ../artifacts/systemd-boot-efi.deb ../artifacts/systemd + + - name: Make + shell: bash + run: | + docker run -i --rm \ + --user $(id -u):$(id -g) \ + --workdir="$PWD" \ + -v "$(dirname $PWD)":"$(dirname $PWD)" \ + ${{ inputs.docker_image }} bash -c " + make O=../kobj defconfig + make O=../kobj -j$(nproc) + make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1 + " + + - name: Package DLKM into ramdisk + shell: bash + run: | (cd ../kobj/tar-install ; find lib/modules | cpio -o -H newc -R +0:+0 | gzip -9 >> ../../artifacts/ramdisk.gz) \ No newline at end of file diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml index 186a19275e388..be9b2587685e4 100644 --- a/.github/actions/lava_job_render/action.yml +++ b/.github/actions/lava_job_render/action.yml @@ -1,154 +1,154 @@ -name: Test Action -inputs: - docker_image: - description: Docker image - required: true - default: kmake-image:latest - -runs: - using: "composite" - steps: - - name: Process presigned_urls.json - id: process_urls - uses: actions/github-script@v7 - with: - script: | - const fs = require('fs'); - const p = require('path'); - // Helper function to find URL by filename - function findUrlByFilename(filename) { - for (const [path, url] of Object.entries(data)) { - if (path.endsWith(filename)) { - return url; - } - } - return null; - } - const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json'); - if (fs.existsSync(filePath)) { - console.log("File exists"); - } else { - console.log("File does not exist"); - core.setFailed(`File not found: ${filePath}`); - } - // Read the JSON file - const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); - // Extract URLs into variables - const modulesTarUrl = findUrlByFilename('modules.tar.xz'); - const imageUrl = findUrlByFilename('Image'); - const vmlinuxUrl = findUrlByFilename('vmlinux'); - const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb'); - // Set outputs - core.setOutput('modules_url', modulesTarUrl); - core.setOutput('image_url', imageUrl); - core.setOutput('vmlinux_url', vmlinuxUrl); - core.setOutput('dtb_url', dtbUrl); - console.log(`Modules URL: ${modulesTarUrl}`); - console.log(`Image URL: ${imageUrl}`); - console.log(`Vmlinux URL: ${vmlinuxUrl}`); - console.log(`Dtb URL: ${dtbUrl}`); - - - name: Create metadata.json - id: create_metadata - shell: bash - run: | - echo "Creating job definition" - # Create the job definition using the processed URLs - cd ../job_render - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \ - ${{ inputs.docker_image }} \ - jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json - - - name: Upload metadata.json - id: upload_metadata - uses: ./.github/actions/aws_s3_helper - with: - local_file: ../job_render/data/metadata.json - s3_bucket: qli-prd-kernel-gh-artifacts - mode: single-upload - - - name: Create template json - shell: bash - run: | - echo "Creating job definition" - metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}" - vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}" - image_url="${{ steps.process_urls.outputs.image_url }}" - modules_url="${{ steps.process_urls.outputs.modules_url }}" - # Create the job definition using the processed URLs - cd ../job_render - # using metadata_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e metadata_url="$metadata_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # using image_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e image_url="$image_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # using vmlinux_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e vmlinux_url="$vmlinux_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # using modules_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e modules_url="$modules_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - - - name: Update firmware and ramdisk - shell: bash - run: | - cd ../job_render - ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" - firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)" - # using ramdisk_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e ramdisk_url="$ramdisk_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - - # using firmware_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e firmware_url="$firmware_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - - - name: Create lava_job_definition - shell: bash - run: | - cd ../job_render - mkdir renders - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - ${{ inputs.docker_image }} \ - sh -c 'export BOOT_METHOD=fastboot && \ - export TARGET=qcs6490-rb3gen2 && \ - export TARGET_DTB=qcs6490-rb3gen2 && \ - python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json' \ No newline at end of file +name: Test Action +inputs: + docker_image: + description: Docker image + required: true + default: kmake-image:latest + +runs: + using: "composite" + steps: + - name: Process presigned_urls.json + id: process_urls + uses: actions/github-script@v7 + with: + script: | + const fs = require('fs'); + const p = require('path'); + // Helper function to find URL by filename + function findUrlByFilename(filename) { + for (const [path, url] of Object.entries(data)) { + if (path.endsWith(filename)) { + return url; + } + } + return null; + } + const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json'); + if (fs.existsSync(filePath)) { + console.log("File exists"); + } else { + console.log("File does not exist"); + core.setFailed(`File not found: ${filePath}`); + } + // Read the JSON file + const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); + // Extract URLs into variables + const modulesTarUrl = findUrlByFilename('modules.tar.xz'); + const imageUrl = findUrlByFilename('Image'); + const vmlinuxUrl = findUrlByFilename('vmlinux'); + const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb'); + // Set outputs + core.setOutput('modules_url', modulesTarUrl); + core.setOutput('image_url', imageUrl); + core.setOutput('vmlinux_url', vmlinuxUrl); + core.setOutput('dtb_url', dtbUrl); + console.log(`Modules URL: ${modulesTarUrl}`); + console.log(`Image URL: ${imageUrl}`); + console.log(`Vmlinux URL: ${vmlinuxUrl}`); + console.log(`Dtb URL: ${dtbUrl}`); + + - name: Create metadata.json + id: create_metadata + shell: bash + run: | + echo "Creating job definition" + # Create the job definition using the processed URLs + cd ../job_render + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \ + ${{ inputs.docker_image }} \ + jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json + + - name: Upload metadata.json + id: upload_metadata + uses: ./.github/actions/aws_s3_helper + with: + local_file: ../job_render/data/metadata.json + s3_bucket: qli-prd-kernel-gh-artifacts + mode: single-upload + + - name: Create template json + shell: bash + run: | + echo "Creating job definition" + metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}" + vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}" + image_url="${{ steps.process_urls.outputs.image_url }}" + modules_url="${{ steps.process_urls.outputs.modules_url }}" + # Create the job definition using the processed URLs + cd ../job_render + # using metadata_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e metadata_url="$metadata_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + # using image_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e image_url="$image_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + # using vmlinux_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e vmlinux_url="$vmlinux_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + # using modules_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e modules_url="$modules_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + - name: Update firmware and ramdisk + shell: bash + run: | + cd ../job_render + ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" + firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)" + # using ramdisk_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e ramdisk_url="$ramdisk_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + # using firmware_url + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + -e firmware_url="$firmware_url" \ + ${{ inputs.docker_image }} \ + jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json + + - name: Create lava_job_definition + shell: bash + run: | + cd ../job_render + mkdir renders + docker run -i --rm \ + --user "$(id -u):$(id -g)" \ + --workdir="$PWD" \ + -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ + ${{ inputs.docker_image }} \ + sh -c 'export BOOT_METHOD=fastboot && \ + export TARGET=qcs6490-rb3gen2 && \ + export TARGET_DTB=qcs6490-rb3gen2 && \ + python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json' diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0ff23fc40f4fc..ad565b476c3a1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -1,75 +1,75 @@ -name: _build -on: - workflow_call: - inputs: - docker_image: - description: Docker image - type: string - required: true - -jobs: - build: - runs-on: - group: GHA-Kernel-SelfHosted-RG - labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.ref }} - fetch-depth: 0 - - - name: Pull docker image - uses: ./.github/actions/pull_docker_image - with: - image: ${{ inputs.docker_image }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - name: Build workspace - id: build_workspace - uses: ./.github/actions/build - with: - docker_image: ${{ inputs.docker_image }} - - - name: Create file list for artifacts upload - run: | - touch ../artifacts/file_list.txt - tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/ - echo "modules.tar.xz" >> ../artifacts/file_list.txt - echo "../kobj/arch/arm64/boot/Image" >> ../artifacts/file_list.txt - echo "../kobj/vmlinux" >> ../artifacts/file_list.txt - echo "../kobj/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb" >> ../artifacts/file_list.txt - - - name: Upload artifacts - uses: ./.github/actions/aws_s3_helper - with: - s3_bucket: qli-prd-kernel-gh-artifacts - aws_access_key_id: ${{ secrets.AWSKEYID }} - aws_secret_access_key: ${{ secrets.AWSACCESSKEY }} - local_file: ../artifacts/file_list.txt - mode: multi-upload - - - name: Clean up - run: | - rm -rf ../artifacts - rm -rf ../kobj - rm -rf modules.tar.xz - - - name: Update summary - if: success() || failure() - shell: bash - run: | - if [ ${{ steps.build_workspace.outcome }} == 'success' ]; then - echo "Build was successful" - summary=":heavy_check_mark: Build Success" - else - echo "Build failed" - summary=":x: Build Failed" - fi - SUMMARY=' -
Build Summary - - '${summary}' -
- ' - echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY \ No newline at end of file +name: _build +on: + workflow_call: + inputs: + docker_image: + description: Docker image + type: string + required: true + +jobs: + build: + runs-on: + group: GHA-Kernel-SelfHosted-RG + labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Pull docker image + uses: ./.github/actions/pull_docker_image + with: + image: ${{ inputs.docker_image }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Build workspace + id: build_workspace + uses: ./.github/actions/build + with: + docker_image: ${{ inputs.docker_image }} + + - name: Create file list for artifacts upload + run: | + touch ../artifacts/file_list.txt + tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/ + echo "modules.tar.xz" >> ../artifacts/file_list.txt + echo "../kobj/arch/arm64/boot/Image" >> ../artifacts/file_list.txt + echo "../kobj/vmlinux" >> ../artifacts/file_list.txt + echo "../kobj/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb" >> ../artifacts/file_list.txt + + - name: Upload artifacts + uses: ./.github/actions/aws_s3_helper + with: + s3_bucket: qli-prd-kernel-gh-artifacts + aws_access_key_id: ${{ secrets.AWSKEYID }} + aws_secret_access_key: ${{ secrets.AWSACCESSKEY }} + local_file: ../artifacts/file_list.txt + mode: multi-upload + + - name: Clean up + run: | + rm -rf ../artifacts + rm -rf ../kobj + rm -rf modules.tar.xz + + - name: Update summary + if: success() || failure() + shell: bash + run: | + if [ ${{ steps.build_workspace.outcome }} == 'success' ]; then + echo "Build was successful" + summary=":heavy_check_mark: Build Success" + else + echo "Build failed" + summary=":x: Build Failed" + fi + SUMMARY=' +
Build Summary + + '${summary}' +
+ ' + echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 384f4bf7a7ed7..669e35beb347f 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -1,98 +1,98 @@ -name: _test -description: Run tests on LAVA - -on: - workflow_call: - inputs: - docker_image: - description: Docker image - type: string - required: true - default: kmake-image:latest - -jobs: - test: - runs-on: - group: GHA-Kernel-SelfHosted-RG - labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.ref }} - fetch-depth: 0 - - - name: Pull docker image - uses: ./.github/actions/pull_docker_image - with: - image: ${{ inputs.docker_image }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - name: Download URLs list - uses: actions/download-artifact@v4 - with: - name: presigned_urls.json - path: ${{ github.workspace }} - - - name: Clone lava job render scripts - run: cd .. && git clone https://github.com/qualcomm-linux/job_render - - - name: Create lava job definition - uses: ./.github/actions/lava_job_render - id: create_job_definition - with: - docker_image: ${{ inputs.docker_image }} - - - name: Submit lava job - id: submit_job - run: | - cd ../job_render - job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml") - job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id" - echo "job_id=$job_id" >> $GITHUB_OUTPUT - echo "job_url=$job_url" >> $GITHUB_OUTPUT - echo "Lava Job: $job_url" - echo "JOB_ID=$job_id" >> $GITHUB_ENV - - - name: Check lava job results - id: check_job - run: | - STATE="" - while [ "$STATE" != "Finished" ]; do - state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state) - STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') - echo "Current status: $STATE" - sleep 30 - done - health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health) - HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') - if [[ "$HEALTH" == "Complete" ]]; then - echo "Lava job passed." - summary=":heavy_check_mark: Lava job passed." - echo "summary=$summary" >> $GITHUB_OUTPUT - exit 0 - else - echo "Lava job failed." - summary=":x: Lava job failed." - echo "summary=$summary" >> $GITHUB_OUTPUT - exit 1 - fi - - - name: Update summary - if: success() || failure() - shell: bash - run: | - if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then - status=":x: Test job failed" - else - status="${{ steps.check_job.outputs.summary }}" - job_url="${{ steps.submit_job.outputs.job_url }}" - job_id="${{ steps.submit_job.outputs.job_id }}" - fi - SUMMARY=' -
'${status}' -
- JOB ID: '${job_id}' -
- ' +name: _test +description: Run tests on LAVA + +on: + workflow_call: + inputs: + docker_image: + description: Docker image + type: string + required: true + default: kmake-image:latest + +jobs: + test: + runs-on: + group: GHA-Kernel-SelfHosted-RG + labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: ${{ github.ref }} + fetch-depth: 0 + + - name: Pull docker image + uses: ./.github/actions/pull_docker_image + with: + image: ${{ inputs.docker_image }} + github_token: ${{ secrets.GITHUB_TOKEN }} + + - name: Download URLs list + uses: actions/download-artifact@v4 + with: + name: presigned_urls.json + path: ${{ github.workspace }} + + - name: Clone lava job render scripts + run: cd .. && git clone https://github.com/qualcomm-linux/job_render + + - name: Create lava job definition + uses: ./.github/actions/lava_job_render + id: create_job_definition + with: + docker_image: ${{ inputs.docker_image }} + + - name: Submit lava job + id: submit_job + run: | + cd ../job_render + job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml") + job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id" + echo "job_id=$job_id" >> $GITHUB_OUTPUT + echo "job_url=$job_url" >> $GITHUB_OUTPUT + echo "Lava Job: $job_url" + echo "JOB_ID=$job_id" >> $GITHUB_ENV + + - name: Check lava job results + id: check_job + run: | + STATE="" + while [ "$STATE" != "Finished" ]; do + state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state) + STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') + echo "Current status: $STATE" + sleep 30 + done + health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health) + HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') + if [[ "$HEALTH" == "Complete" ]]; then + echo "Lava job passed." + summary=":heavy_check_mark: Lava job passed." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 0 + else + echo "Lava job failed." + summary=":x: Lava job failed." + echo "summary=$summary" >> $GITHUB_OUTPUT + exit 1 + fi + + - name: Update summary + if: success() || failure() + shell: bash + run: | + if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then + status=":x: Test job failed" + else + status="${{ steps.check_job.outputs.summary }}" + job_url="${{ steps.submit_job.outputs.job_url }}" + job_id="${{ steps.submit_job.outputs.job_id }}" + fi + SUMMARY=' +
'${status}' +
+ JOB ID: '${job_id}' +
+ ' echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY \ No newline at end of file From 6924ecfc29968d9d3f698c141958b4530b01ee74 Mon Sep 17 00:00:00 2001 From: Vishal Kumar Date: Tue, 27 May 2025 14:53:03 +0530 Subject: [PATCH 10/14] ci: Reusable workflow for sync Reusable workflow for syncing kernel & kernel-topics workspace Kernel - It will be having only the checkout code with PR data Kernel topics - It needs to clone the kernel repo and automerge tool, Creates a merge.conf which will be containing git remotes of qcom-next and topic branch then it fetches the PR data and merge on top of integration branch. Signed-off-by: Vishal Kumar --- .github/actions/build/action.yml | 9 +++- .github/actions/sync/action.yml | 84 ++++++++++++++++++++++++++++++++ .github/workflows/build.yml | 17 ++++--- 3 files changed, 101 insertions(+), 9 deletions(-) create mode 100644 .github/actions/sync/action.yml diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml index c998e4d418cf9..809183151550f 100644 --- a/.github/actions/build/action.yml +++ b/.github/actions/build/action.yml @@ -6,6 +6,9 @@ inputs: description: Docker image required: true default: kmake-image:latest + workspace_path: + description: Workspace path + required: true runs: using: "composite" @@ -13,6 +16,7 @@ runs: - name: Download artifacts shell: bash run: | + cd ${{ inputs.workspace_path }} mkdir -p ../artifacts && \ wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \ wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \ @@ -21,6 +25,7 @@ runs: - name: Make shell: bash run: | + cd ${{ inputs.workspace_path }} docker run -i --rm \ --user $(id -u):$(id -g) \ --workdir="$PWD" \ @@ -34,4 +39,6 @@ runs: - name: Package DLKM into ramdisk shell: bash run: | - (cd ../kobj/tar-install ; find lib/modules | cpio -o -H newc -R +0:+0 | gzip -9 >> ../../artifacts/ramdisk.gz) \ No newline at end of file + cd ${{ inputs.workspace_path }} + (cd ../kobj/tar-install ; find lib/modules | cpio -o -H newc -R +0:+0 | gzip -9 >> ../../artifacts/ramdisk.gz) + diff --git a/.github/actions/sync/action.yml b/.github/actions/sync/action.yml new file mode 100644 index 0000000000000..f3c852977500a --- /dev/null +++ b/.github/actions/sync/action.yml @@ -0,0 +1,84 @@ +name: Sync workspace + +inputs: + base_branch: + description: Base branch + required: true + default: qcom-next-staging + pr_number: + description: PR number + required: false + +outputs: + workspace_path: + description: Sync workspace path + value: ${{ steps.set-workspace.outputs.workspace }} + +runs: + using: "composite" + steps: + - name: Checkout PR branch + if: inputs.base_branch == 'qcom-next-staging' + uses: actions/checkout@v4 + shell: bash + with: + fetch-depth: 0 + + - name: Configure git + shell: bash + run: | + git config --global user.name "github-actions" + git config --global user.email "github-actions@github.com" + + - name: Sync with latest changes + if: inputs.base_branch == 'qcom-next-staging' + shell: bash + run: | + set -e + echo "Syncing with latest changes..." + git fetch origin ${{ inputs.base_branch }} + git merge --no-ff origin/${{ inputs.base_branch }} + + - name: Clone repositories + if: inputs.base_branch != 'qcom-next-staging' + shell: bash + run: | + git clone https://github.com/qualcomm-linux/kernel.git + git clone https://github.com/qualcomm-linux/automerge.git + + - name: Create merge configuration + if: inputs.base_branch != 'qcom-next-staging' + shell: bash + run: | + TOPIC_BRANCH=${{ inputs.base_branch }} + cat < merge.conf + baseline https://github.com/qualcomm-linux/kernel.git qcom-next + topic https://github.com/qualcomm-linux/kernel-topics.git $TOPIC_BRANCH + EOF + echo "File 'merge.conf' created successfully." + + - name: Run auto merge + id: automerge + if: inputs.base_branch != 'qcom-next-staging' + shell: bash + run: | + cd kernel + ../automerge/ci-merge -f ../merge.conf -t head -n + + - name: Fetch PR + if: inputs.base_branch != 'qcom-next-staging' + shell: bash + run: | + cd kernel + git fetch https://github.com/qualcomm-linux/kernel-topics.git pull/${{inputs.pr_number}}/head:pr-${{inputs.pr_number}} + git merge pr-${{inputs.pr_number}} --no-commit + git commit -m "Merged PR ${{inputs.pr_number}}" + + - name: Set workspace path + id: set-workspace + shell: bash + run: | + if [[ "${{ inputs.base_branch }}" == "qcom-next-staging" ]]; then + echo "workspace=${{ github.workspace }}" >> "$GITHUB_OUTPUT" + else + echo "workspace=${{ github.workspace }}/kernel" >> "$GITHUB_OUTPUT" diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ad565b476c3a1..93989491dd61c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,11 +13,12 @@ jobs: group: GHA-Kernel-SelfHosted-RG labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] steps: - - name: Checkout code - uses: actions/checkout@v4 + - name: Sync codebase + id: sync + uses: ./.github/actions/sync with: - ref: ${{ github.ref }} - fetch-depth: 0 + base_branch: ${{ github.base_ref }} + pr_number: ${{ github.event.pull_request.number }} - name: Pull docker image uses: ./.github/actions/pull_docker_image @@ -30,9 +31,11 @@ jobs: uses: ./.github/actions/build with: docker_image: ${{ inputs.docker_image }} + workspace_path: ${{ steps.sync.outputs.workspace_path }} - name: Create file list for artifacts upload run: | + cd ${{ steps.sync.outputs.workspace_path }} touch ../artifacts/file_list.txt tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/ echo "modules.tar.xz" >> ../artifacts/file_list.txt @@ -44,13 +47,12 @@ jobs: uses: ./.github/actions/aws_s3_helper with: s3_bucket: qli-prd-kernel-gh-artifacts - aws_access_key_id: ${{ secrets.AWSKEYID }} - aws_secret_access_key: ${{ secrets.AWSACCESSKEY }} - local_file: ../artifacts/file_list.txt + local_file: ${{ steps.sync.outputs.workspace_path }}/../artifacts/file_list.txt mode: multi-upload - name: Clean up run: | + cd ${{ steps.sync.outputs.workspace_path }} rm -rf ../artifacts rm -rf ../kobj rm -rf modules.tar.xz @@ -68,7 +70,6 @@ jobs: fi SUMMARY='
Build Summary - '${summary}'
' From 0843415a347fd7277c5b4733c74013ec6da26b5c Mon Sep 17 00:00:00 2001 From: VISHAL KUMAR Date: Wed, 28 May 2025 15:32:17 +0530 Subject: [PATCH 11/14] ci: Fix: enable pre_merge CI workflow for all branches (#16) The pre-merge process should be utilized for kernel-topics as well and must be accessible across all branches. Additionally, the bash shell has been removed from the checkout code. Signed-off-by: Vishal Kumar --- .github/actions/sync/action.yml | 10 +++++++++- .github/workflows/build.yml | 5 +++++ .github/workflows/pre_merge.yml | 2 -- 3 files changed, 14 insertions(+), 3 deletions(-) diff --git a/.github/actions/sync/action.yml b/.github/actions/sync/action.yml index f3c852977500a..b10fb2cd27580 100644 --- a/.github/actions/sync/action.yml +++ b/.github/actions/sync/action.yml @@ -17,10 +17,16 @@ outputs: runs: using: "composite" steps: + - name: Clean workspace + shell: bash + run: | + echo "Cleaning up workspace..." + rm -rf ${{ github.workspace }}/* + echo "Workspace cleaned successfully!" + - name: Checkout PR branch if: inputs.base_branch == 'qcom-next-staging' uses: actions/checkout@v4 - shell: bash with: fetch-depth: 0 @@ -82,3 +88,5 @@ runs: echo "workspace=${{ github.workspace }}" >> "$GITHUB_OUTPUT" else echo "workspace=${{ github.workspace }}/kernel" >> "$GITHUB_OUTPUT" + fi + diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 93989491dd61c..55028998299a7 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -13,6 +13,11 @@ jobs: group: GHA-Kernel-SelfHosted-RG labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 + - name: Sync codebase id: sync uses: ./.github/actions/sync diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index 0bca8e714311f..8914f0f948043 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -1,8 +1,6 @@ name: pre_merge on: pull_request_target: - branches: - - qcom-next-staging jobs: build: From 55650dfb22f55b911e182e873666bae01e209af4 Mon Sep 17 00:00:00 2001 From: sgaud-quic Date: Wed, 28 May 2025 15:32:50 +0530 Subject: [PATCH 12/14] CI : Add test-definition parameter (#17) Adding test definition as a parameter while generating job_render yml, this will enable test cases which are part of test definition. Signed-off-by: Salendarsingh Gaud --- .github/actions/lava_job_render/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml index be9b2587685e4..6b954744411ad 100644 --- a/.github/actions/lava_job_render/action.yml +++ b/.github/actions/lava_job_render/action.yml @@ -151,4 +151,4 @@ runs: sh -c 'export BOOT_METHOD=fastboot && \ export TARGET=qcs6490-rb3gen2 && \ export TARGET_DTB=qcs6490-rb3gen2 && \ - python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json' + python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --KernelCI_PreMerge' \ No newline at end of file From 661ecf3ecd0be7994ef7533bed2d24f77939780a Mon Sep 17 00:00:00 2001 From: Vishal Kumar Date: Fri, 30 May 2025 16:44:01 +0530 Subject: [PATCH 13/14] ci: Cleanup: use pre-merge workflow from kernel-config Reuse the kernel-config pre-merge workflows Signed-off-by: Vishal Kumar --- .github/actions/aws_s3_helper/action.yml | 90 ----------- .github/actions/build/action.yml | 44 ------ .github/actions/lava_job_render/action.yml | 154 ------------------- .github/actions/pull_docker_image/action.yml | 26 ---- .github/actions/sync/action.yml | 92 ----------- .github/workflows/build.yml | 81 ---------- .github/workflows/pre_merge.yml | 13 +- .github/workflows/test.yml | 98 ------------ 8 files changed, 2 insertions(+), 596 deletions(-) delete mode 100644 .github/actions/aws_s3_helper/action.yml delete mode 100644 .github/actions/build/action.yml delete mode 100644 .github/actions/lava_job_render/action.yml delete mode 100644 .github/actions/pull_docker_image/action.yml delete mode 100644 .github/actions/sync/action.yml delete mode 100644 .github/workflows/build.yml delete mode 100644 .github/workflows/test.yml diff --git a/.github/actions/aws_s3_helper/action.yml b/.github/actions/aws_s3_helper/action.yml deleted file mode 100644 index 5c94170ea1138..0000000000000 --- a/.github/actions/aws_s3_helper/action.yml +++ /dev/null @@ -1,90 +0,0 @@ -name: AWS S3 Helper -description: Upload and download files from AWS S3 - -inputs: - s3_bucket: - description: S3 Bucket Name - required: true - local_file: - description: Local file paths - required: false - default: ../artifacts/file_list.txt - download_file: - description: Download file paths - required: false - default: '' - mode: - description: Mode of operation (upload/download) - required: true - default: single-upload - -outputs: - presigned_url: - description: Pre-signed URL for the uploaded file - value: ${{ steps.sync-data.outputs.presigned_url }} - -runs: - using: "composite" - steps: - - name: Sync Data - id: sync-data - shell: bash - env: - UPLOAD_LOCATION: ${{ github.repository_owner }}/${{ github.event.repository.name }}/${{ github.workflow }}/${{ github.head_ref != '' && github.head_ref || github.run_id }}/ - run: | - echo "::group::$(printf '__________ %-100s' 'Process' | tr ' ' _)" - case "${{ inputs.mode }}" in - multi-upload) - echo "Uploading files to S3 bucket..." - first_line=true - # Start the JSON object - echo "{" > ${{ github.workspace }}/presigned_urls.json - while IFS= read -r file; do - if [ -f "$file" ]; then - echo "Uploading $file..." - aws s3 cp "$file" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} - echo "Uploaded $file to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" - echo "Creating Pre-signed URL for $file..." - filename=$(basename "$file") - presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}$filename --expires-in 3600) - if [ "$first_line" = true ]; then - first_line=false - else - echo "," >> ${{ github.workspace }}/presigned_urls.json - fi - # Append the pre-signed URL to the file - echo " \"${file}\": \"${presigned_url}\"" >> ${{ github.workspace }}/presigned_urls.json - echo "Pre-signed URL for $file: $presigned_url" - else - echo "Warning: $file does not exist or is not a regular file." - fi - done < "${{ inputs.local_file }}" - # Close the JSON object - echo "}" >> ${{ github.workspace }}/presigned_urls.json - ;; - single-upload) - echo "Uploading single file to S3 bucket..." - aws s3 cp "${{ inputs.local_file }}" s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }} - echo "Uploaded ${{ inputs.local_file }} to s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}" - echo "Creating Pre-signed URL for ${{ inputs.local_file }}..." - presigned_url=$(aws s3 presign s3://${{ inputs.s3_bucket }}/${{ env.UPLOAD_LOCATION }}${{ inputs.local_file }} --expires-in 3600) - echo "presigned_url=${presigned_url}" >> "$GITHUB_OUTPUT" - ;; - download) - #Download The required file from s3 - echo "Downloading files from S3 bucket..." - aws s3 sync s3://${{ inputs.s3_bucket }}/${{ inputs.download_file }} . - ;; - *) - echo "Invalid mode. Use 'upload' or 'download'." - exit 1 - ;; - esac - - - name: Upload artifacts - if: ${{ inputs.mode == 'multi-upload' }} - uses: actions/upload-artifact@v4 - with: - name: presigned_urls.json - path: ${{ github.workspace }}/presigned_urls.json - retention-days: 1 \ No newline at end of file diff --git a/.github/actions/build/action.yml b/.github/actions/build/action.yml deleted file mode 100644 index 809183151550f..0000000000000 --- a/.github/actions/build/action.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Build workspace -description: Build workspace - -inputs: - docker_image: - description: Docker image - required: true - default: kmake-image:latest - workspace_path: - description: Workspace path - required: true - -runs: - using: "composite" - steps: - - name: Download artifacts - shell: bash - run: | - cd ${{ inputs.workspace_path }} - mkdir -p ../artifacts && \ - wget -O ../artifacts/ramdisk.gz https://snapshots.linaro.org/member-builds/qcomlt/testimages/arm64/1379/initramfs-test-image-qemuarm64-20230321073831-1379.rootfs.cpio.gz && \ - wget -O ../artifacts/systemd-boot-efi.deb http://ports.ubuntu.com/pool/universe/s/systemd/systemd-boot-efi_255.4-1ubuntu8_arm64.deb && \ - dpkg-deb -xv ../artifacts/systemd-boot-efi.deb ../artifacts/systemd - - - name: Make - shell: bash - run: | - cd ${{ inputs.workspace_path }} - docker run -i --rm \ - --user $(id -u):$(id -g) \ - --workdir="$PWD" \ - -v "$(dirname $PWD)":"$(dirname $PWD)" \ - ${{ inputs.docker_image }} bash -c " - make O=../kobj defconfig - make O=../kobj -j$(nproc) - make O=../kobj -j$(nproc) dir-pkg INSTALL_MOD_STRIP=1 - " - - - name: Package DLKM into ramdisk - shell: bash - run: | - cd ${{ inputs.workspace_path }} - (cd ../kobj/tar-install ; find lib/modules | cpio -o -H newc -R +0:+0 | gzip -9 >> ../../artifacts/ramdisk.gz) - diff --git a/.github/actions/lava_job_render/action.yml b/.github/actions/lava_job_render/action.yml deleted file mode 100644 index 6b954744411ad..0000000000000 --- a/.github/actions/lava_job_render/action.yml +++ /dev/null @@ -1,154 +0,0 @@ -name: Test Action -inputs: - docker_image: - description: Docker image - required: true - default: kmake-image:latest - -runs: - using: "composite" - steps: - - name: Process presigned_urls.json - id: process_urls - uses: actions/github-script@v7 - with: - script: | - const fs = require('fs'); - const p = require('path'); - // Helper function to find URL by filename - function findUrlByFilename(filename) { - for (const [path, url] of Object.entries(data)) { - if (path.endsWith(filename)) { - return url; - } - } - return null; - } - const filePath = p.join(process.env.GITHUB_WORKSPACE, 'presigned_urls.json'); - if (fs.existsSync(filePath)) { - console.log("File exists"); - } else { - console.log("File does not exist"); - core.setFailed(`File not found: ${filePath}`); - } - // Read the JSON file - const data = JSON.parse(fs.readFileSync(filePath, 'utf-8')); - // Extract URLs into variables - const modulesTarUrl = findUrlByFilename('modules.tar.xz'); - const imageUrl = findUrlByFilename('Image'); - const vmlinuxUrl = findUrlByFilename('vmlinux'); - const dtbUrl = findUrlByFilename('qcs6490-rb3gen2.dtb'); - // Set outputs - core.setOutput('modules_url', modulesTarUrl); - core.setOutput('image_url', imageUrl); - core.setOutput('vmlinux_url', vmlinuxUrl); - core.setOutput('dtb_url', dtbUrl); - console.log(`Modules URL: ${modulesTarUrl}`); - console.log(`Image URL: ${imageUrl}`); - console.log(`Vmlinux URL: ${vmlinuxUrl}`); - console.log(`Dtb URL: ${dtbUrl}`); - - - name: Create metadata.json - id: create_metadata - shell: bash - run: | - echo "Creating job definition" - # Create the job definition using the processed URLs - cd ../job_render - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e dtb_url="${{ steps.process_urls.outputs.dtb_url }}" \ - ${{ inputs.docker_image }} \ - jq '.artifacts["dtbs/qcom/qcs6490-rb3gen2.dtb"] = env.dtb_url' data/metadata.json > temp.json && mv temp.json data/metadata.json - - - name: Upload metadata.json - id: upload_metadata - uses: ./.github/actions/aws_s3_helper - with: - local_file: ../job_render/data/metadata.json - s3_bucket: qli-prd-kernel-gh-artifacts - mode: single-upload - - - name: Create template json - shell: bash - run: | - echo "Creating job definition" - metadata_url="${{ steps.upload_metadata.outputs.presigned_url }}" - vmlinux_url="${{ steps.process_urls.outputs.vmlinux_url }}" - image_url="${{ steps.process_urls.outputs.image_url }}" - modules_url="${{ steps.process_urls.outputs.modules_url }}" - # Create the job definition using the processed URLs - cd ../job_render - # using metadata_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e metadata_url="$metadata_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.metadata = env.metadata_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # using image_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e image_url="$image_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.kernel = env.image_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # using vmlinux_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e vmlinux_url="$vmlinux_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.vmlinux = env.vmlinux_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - # using modules_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e modules_url="$modules_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.modules = env.modules_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - - - name: Update firmware and ramdisk - shell: bash - run: | - cd ../job_render - ramdisk_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-kerneltest-full-image-qcom-armv8a.cpio.gz --expires 7600)" - firmware_url="$(aws s3 presign s3://qli-prd-kernel-gh-artifacts/meta-qcom/initramfs-firmware-rb3gen2-image-qcom-armv8a.cpio.gz --expires 7600)" - # using ramdisk_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e ramdisk_url="$ramdisk_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.ramdisk = env.ramdisk_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - - # using firmware_url - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - -e firmware_url="$firmware_url" \ - ${{ inputs.docker_image }} \ - jq '.artifacts.firmware = env.firmware_url' data/cloudData.json > temp.json && mv temp.json data/cloudData.json - - - name: Create lava_job_definition - shell: bash - run: | - cd ../job_render - mkdir renders - docker run -i --rm \ - --user "$(id -u):$(id -g)" \ - --workdir="$PWD" \ - -v "$(dirname "$PWD")":"$(dirname "$PWD")" \ - ${{ inputs.docker_image }} \ - sh -c 'export BOOT_METHOD=fastboot && \ - export TARGET=qcs6490-rb3gen2 && \ - export TARGET_DTB=qcs6490-rb3gen2 && \ - python3 lava_Job_definition_generator.py --localjson ./data/cloudData.json --KernelCI_PreMerge' \ No newline at end of file diff --git a/.github/actions/pull_docker_image/action.yml b/.github/actions/pull_docker_image/action.yml deleted file mode 100644 index 9618615b02cf9..0000000000000 --- a/.github/actions/pull_docker_image/action.yml +++ /dev/null @@ -1,26 +0,0 @@ -name: Pull docker image -description: Pull docker image - -inputs: - image: - description: The docker image to pull - required: true - default: kmake-image:latest - - github_token: - description: The GitHub token to use for authentication - required: true - -runs: - using: "composite" - steps: - - name: Clone kmake-image - shell: bash - run: | - git clone https://github.com/qualcomm-linux/kmake-image.git - - - name: Build docker image - shell: bash - run: | - cd kmake-image - docker build . -t kmake-image diff --git a/.github/actions/sync/action.yml b/.github/actions/sync/action.yml deleted file mode 100644 index b10fb2cd27580..0000000000000 --- a/.github/actions/sync/action.yml +++ /dev/null @@ -1,92 +0,0 @@ -name: Sync workspace - -inputs: - base_branch: - description: Base branch - required: true - default: qcom-next-staging - pr_number: - description: PR number - required: false - -outputs: - workspace_path: - description: Sync workspace path - value: ${{ steps.set-workspace.outputs.workspace }} - -runs: - using: "composite" - steps: - - name: Clean workspace - shell: bash - run: | - echo "Cleaning up workspace..." - rm -rf ${{ github.workspace }}/* - echo "Workspace cleaned successfully!" - - - name: Checkout PR branch - if: inputs.base_branch == 'qcom-next-staging' - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Configure git - shell: bash - run: | - git config --global user.name "github-actions" - git config --global user.email "github-actions@github.com" - - - name: Sync with latest changes - if: inputs.base_branch == 'qcom-next-staging' - shell: bash - run: | - set -e - echo "Syncing with latest changes..." - git fetch origin ${{ inputs.base_branch }} - git merge --no-ff origin/${{ inputs.base_branch }} - - - name: Clone repositories - if: inputs.base_branch != 'qcom-next-staging' - shell: bash - run: | - git clone https://github.com/qualcomm-linux/kernel.git - git clone https://github.com/qualcomm-linux/automerge.git - - - name: Create merge configuration - if: inputs.base_branch != 'qcom-next-staging' - shell: bash - run: | - TOPIC_BRANCH=${{ inputs.base_branch }} - cat < merge.conf - baseline https://github.com/qualcomm-linux/kernel.git qcom-next - topic https://github.com/qualcomm-linux/kernel-topics.git $TOPIC_BRANCH - EOF - echo "File 'merge.conf' created successfully." - - - name: Run auto merge - id: automerge - if: inputs.base_branch != 'qcom-next-staging' - shell: bash - run: | - cd kernel - ../automerge/ci-merge -f ../merge.conf -t head -n - - - name: Fetch PR - if: inputs.base_branch != 'qcom-next-staging' - shell: bash - run: | - cd kernel - git fetch https://github.com/qualcomm-linux/kernel-topics.git pull/${{inputs.pr_number}}/head:pr-${{inputs.pr_number}} - git merge pr-${{inputs.pr_number}} --no-commit - git commit -m "Merged PR ${{inputs.pr_number}}" - - - name: Set workspace path - id: set-workspace - shell: bash - run: | - if [[ "${{ inputs.base_branch }}" == "qcom-next-staging" ]]; then - echo "workspace=${{ github.workspace }}" >> "$GITHUB_OUTPUT" - else - echo "workspace=${{ github.workspace }}/kernel" >> "$GITHUB_OUTPUT" - fi - diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml deleted file mode 100644 index 55028998299a7..0000000000000 --- a/.github/workflows/build.yml +++ /dev/null @@ -1,81 +0,0 @@ -name: _build -on: - workflow_call: - inputs: - docker_image: - description: Docker image - type: string - required: true - -jobs: - build: - runs-on: - group: GHA-Kernel-SelfHosted-RG - labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - fetch-depth: 0 - - - name: Sync codebase - id: sync - uses: ./.github/actions/sync - with: - base_branch: ${{ github.base_ref }} - pr_number: ${{ github.event.pull_request.number }} - - - name: Pull docker image - uses: ./.github/actions/pull_docker_image - with: - image: ${{ inputs.docker_image }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - name: Build workspace - id: build_workspace - uses: ./.github/actions/build - with: - docker_image: ${{ inputs.docker_image }} - workspace_path: ${{ steps.sync.outputs.workspace_path }} - - - name: Create file list for artifacts upload - run: | - cd ${{ steps.sync.outputs.workspace_path }} - touch ../artifacts/file_list.txt - tar -cJf modules.tar.xz ../kobj/tar-install/lib/modules/ - echo "modules.tar.xz" >> ../artifacts/file_list.txt - echo "../kobj/arch/arm64/boot/Image" >> ../artifacts/file_list.txt - echo "../kobj/vmlinux" >> ../artifacts/file_list.txt - echo "../kobj/arch/arm64/boot/dts/qcom/qcs6490-rb3gen2.dtb" >> ../artifacts/file_list.txt - - - name: Upload artifacts - uses: ./.github/actions/aws_s3_helper - with: - s3_bucket: qli-prd-kernel-gh-artifacts - local_file: ${{ steps.sync.outputs.workspace_path }}/../artifacts/file_list.txt - mode: multi-upload - - - name: Clean up - run: | - cd ${{ steps.sync.outputs.workspace_path }} - rm -rf ../artifacts - rm -rf ../kobj - rm -rf modules.tar.xz - - - name: Update summary - if: success() || failure() - shell: bash - run: | - if [ ${{ steps.build_workspace.outcome }} == 'success' ]; then - echo "Build was successful" - summary=":heavy_check_mark: Build Success" - else - echo "Build failed" - summary=":x: Build Failed" - fi - SUMMARY=' -
Build Summary - '${summary}' -
- ' - echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/pre_merge.yml b/.github/workflows/pre_merge.yml index 8914f0f948043..0fa65d88abaeb 100644 --- a/.github/workflows/pre_merge.yml +++ b/.github/workflows/pre_merge.yml @@ -3,15 +3,6 @@ on: pull_request_target: jobs: - build: - uses: ./.github/workflows/build.yml + pre-merge: + uses: qualcomm-linux/kernel-config/.github/workflows/pre_merge.yml@main secrets: inherit - with: - docker_image: kmake-image:latest - - test: - needs: [build] - uses: ./.github/workflows/test.yml - secrets: inherit - with: - docker_image: kmake-image:latest diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml deleted file mode 100644 index 669e35beb347f..0000000000000 --- a/.github/workflows/test.yml +++ /dev/null @@ -1,98 +0,0 @@ -name: _test -description: Run tests on LAVA - -on: - workflow_call: - inputs: - docker_image: - description: Docker image - type: string - required: true - default: kmake-image:latest - -jobs: - test: - runs-on: - group: GHA-Kernel-SelfHosted-RG - labels: [ self-hosted, kernel-prd-u2404-x64-large-od-ephem ] - steps: - - name: Checkout code - uses: actions/checkout@v4 - with: - ref: ${{ github.ref }} - fetch-depth: 0 - - - name: Pull docker image - uses: ./.github/actions/pull_docker_image - with: - image: ${{ inputs.docker_image }} - github_token: ${{ secrets.GITHUB_TOKEN }} - - - name: Download URLs list - uses: actions/download-artifact@v4 - with: - name: presigned_urls.json - path: ${{ github.workspace }} - - - name: Clone lava job render scripts - run: cd .. && git clone https://github.com/qualcomm-linux/job_render - - - name: Create lava job definition - uses: ./.github/actions/lava_job_render - id: create_job_definition - with: - docker_image: ${{ inputs.docker_image }} - - - name: Submit lava job - id: submit_job - run: | - cd ../job_render - job_id=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs submit ./renders/lava_job_definition.yaml") - job_url="https://lava-oss.qualcomm.com/scheduler/job/$job_id" - echo "job_id=$job_id" >> $GITHUB_OUTPUT - echo "job_url=$job_url" >> $GITHUB_OUTPUT - echo "Lava Job: $job_url" - echo "JOB_ID=$job_id" >> $GITHUB_ENV - - - name: Check lava job results - id: check_job - run: | - STATE="" - while [ "$STATE" != "Finished" ]; do - state=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep state) - STATE=$(echo "$state" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') - echo "Current status: $STATE" - sleep 30 - done - health=$(docker run -i --rm --workdir="$PWD" -v "$(dirname $PWD)":"$(dirname $PWD)" ${{ inputs.docker_image }} sh -c "lavacli identities add --token ${{secrets.LAVA_OSS_TOKEN}} --uri https://lava-oss.qualcomm.com/RPC2 --username ${{secrets.LAVA_OSS_USER}} production && lavacli -i production jobs show $JOB_ID" | grep Health) - HEALTH=$(echo "$health" | cut -d':' -f2 | sed 's/^ *//;s/ *$//') - if [[ "$HEALTH" == "Complete" ]]; then - echo "Lava job passed." - summary=":heavy_check_mark: Lava job passed." - echo "summary=$summary" >> $GITHUB_OUTPUT - exit 0 - else - echo "Lava job failed." - summary=":x: Lava job failed." - echo "summary=$summary" >> $GITHUB_OUTPUT - exit 1 - fi - - - name: Update summary - if: success() || failure() - shell: bash - run: | - if [ "${{ steps.create_job_definition.conclusion }}" == 'failure' ]; then - status=":x: Test job failed" - else - status="${{ steps.check_job.outputs.summary }}" - job_url="${{ steps.submit_job.outputs.job_url }}" - job_id="${{ steps.submit_job.outputs.job_id }}" - fi - SUMMARY=' -
'${status}' -
- JOB ID: '${job_id}' -
- ' - echo -e "$SUMMARY" >> $GITHUB_STEP_SUMMARY \ No newline at end of file From 637bf4d086a8bab17a33842f42853d42e7e676e9 Mon Sep 17 00:00:00 2001 From: sgaud-quic Date: Tue, 3 Jun 2025 13:14:57 +0530 Subject: [PATCH 14/14] Update README Signed-off-by: sgaud-quic --- README | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README b/README index fd903645e6de0..580d09856b34a 100644 --- a/README +++ b/README @@ -1,4 +1,4 @@ -Linux kernel +Test Linux kernel ============ There are several guides for kernel developers and users. These guides can