diff --git a/.github/workflows/regression-test-pro.yaml b/.github/workflows/regression-test-pro.yaml new file mode 100644 index 0000000000..6a78f9b2b7 --- /dev/null +++ b/.github/workflows/regression-test-pro.yaml @@ -0,0 +1,40 @@ +name: Regression Test Pro + +on: + push: + branches: + - regression-testing + +jobs: + regression-pro: + name: Regression Test Pro Test + runs-on: self-hosted + + steps: + # - name: Checkout code + # uses: actions/checkout@v4 + # with: + # ref: regression-testing + + # - name: Set sysroot + # run: | + # cp /opt/sysroot.tgz ./sysroot.tgz + # tar -xzf ./sysroot.tgz -C ./ariane + # rm -f ./sysroot.tgz + # working-directory: ./soft + + # - name: Install submodule + # run: ./utils/scripts/submodule_init.sh + + - name: Grant permissions to actions-pipeline + run: chmod +x *.sh + working-directory: ./utils/scripts/actions-pipeline + + - name: Grant permissions to helper + run: chmod +x *.sh + working-directory: ./utils/scripts/actions-pipeline/helper + + - name: Run workflow script + run: | + ./run_workflow.sh + working-directory: ./utils/scripts/actions-pipeline \ No newline at end of file diff --git a/socs/defconfig/esp_xilinx-vc707-xc7vx485t_testing b/socs/defconfig/esp_xilinx-vc707-xc7vx485t_testing new file mode 100644 index 0000000000..07f297c0b4 --- /dev/null +++ b/socs/defconfig/esp_xilinx-vc707-xc7vx485t_testing @@ -0,0 +1,41 @@ +CPU_ARCH = ariane +NCPU_TILE = 1 +CONFIG_HAS_SG = y +CONFIG_NOC_ROWS = 3 +CONFIG_NOC_COLS = 2 +CONFIG_COH_NOC_WIDTH = 64 +CONFIG_DMA_NOC_WIDTH = 64 +CONFIG_MULTICAST_NOC_EN = y +CONFIG_MAX_MCAST_DESTS = 4 +CONFIG_QUEUE_SIZE = 4 +CONFIG_CACHE_EN = y +CONFIG_CACHE_RTL = y +#CONFIG_CACHE_SPANDEX is not set +CONFIG_CPU_CACHES = 512 4 1024 16 +CONFIG_ACC_CACHES = 512 4 +CONFIG_CACHE_LINE_SIZE = 128 +CONFIG_SLM_KBYTES = 256 +#CONFIG_JTAG_EN is not set +CONFIG_ETH_EN = y +#CONFIG_IOLINK_EN is not set +CONFIG_IOLINK_WIDTH = 16 +CONFIG_MEM_LINK_WIDTH = 64 +#CONFIG_SVGA_EN is not set +CONGIG_DSU_IP = C0A80107 +CONGIG_DSU_ETH = A6A7A0F8043D +CONFIG_CLK_STR = 0 +CONFIG_MON_DDR = y +CONFIG_MON_MEM = y +CONFIG_MON_INJ = y +CONFIG_MON_ROUTERS = y +CONFIG_MON_ACCELERATORS = y +CONFIG_MON_L2 = y +CONFIG_MON_LLC = y +#CONFIG_MON_DVFS is not set +TILE_0_0 = 0 mem mem +TILE_0_1 = 1 cpu cpu +TILE_1_0 = 2 acc FFT_STRATUS basic_fx32_dma64 1 0 sld +TILE_1_1 = 3 misc IO +TILE_2_0 = 4 acc FFT_STRATUS basic_fx32_dma64 0 0 sld +TILE_2_1 = 5 acc FFT_STRATUS basic_fx32_dma64 0 0 sld +#CONFIG_HAS_DVFS is not set diff --git a/socs/xilinx-vc707-xc7vx485t/Makefile b/socs/xilinx-vc707-xc7vx485t/Makefile index bafe0bef84..3a4f4c9793 100644 --- a/socs/xilinx-vc707-xc7vx485t/Makefile +++ b/socs/xilinx-vc707-xc7vx485t/Makefile @@ -44,26 +44,26 @@ TOP_VLOG_SIM_SRCS += ### Xilinx Vivado hw_server ### -FPGA_HOST ?= localhost -XIL_HW_SERVER_PORT ?= 3121 +FPGA_HOST ?= espdev.cs.columbia.edu +XIL_HW_SERVER_PORT ?= 3122 ### Network configuration ### # IP address or host name of the host connected to the FPGA -UART_IP ?= -UART_PORT ?= +UART_IP ?= espdev.cs.columbia.edu +UART_PORT ?= 4322 # SSH IP address or host name of the ESP Linux instance or gateway -SSH_IP ?= -SSH_PORT ?= 22 +SSH_IP ?= 128.59.22.75 +SSH_PORT ?= 5502 # ESPLink IP address or gateway (DO NOT USE HOST NAME) -ESPLINK_IP ?= -ESPLINK_PORT ?= 46392 +ESPLINK_IP ?= 128.59.22.75 +ESPLINK_PORT ?= 46307 # MAC address for Linux if using IP address reservation (e.g. 00aabb33cc77) -# LINUX_MAC ?= +LINUX_MAC ?= 000A3502CB80 ### Include global Makefile ### diff --git a/soft/common/apps/examples/multifft/multifft.c b/soft/common/apps/examples/multifft/multifft.c index 36f994dc88..0d562aface 100644 --- a/soft/common/apps/examples/multifft/multifft.c +++ b/soft/common/apps/examples/multifft/multifft.c @@ -89,8 +89,14 @@ int main(int argc, char **argv) float *gold[3]; token_t *buf[3]; + int *fft_error = (int*)malloc(NACC * sizeof(int)); + int fft_pass_count = 0; + int pass_test_count = 0, total_test_count = 0; + char pass_log[256]; + char fail_log[256]; - const float ERROR_COUNT_TH = 0.01; + const float ERROR_COUNT_TH = 0.05; + const int TESTING_COUNT_TH = 10; int k; @@ -102,6 +108,8 @@ int main(int argc, char **argv) } init_buffer(buf[0], gold[0], false); + memset(pass_log, 0, sizeof(pass_log)); + memset(fail_log, 0, sizeof(fail_log)); printf("\n====== Non coherent DMA ======\n\n"); printf(" .len = %d\n", len); @@ -117,10 +125,16 @@ int main(int argc, char **argv) errors = validate_buffer(&buf[0][out_offset], gold[0]); - if (((float)errors / (float)len) > ERROR_COUNT_TH) - printf(" + TEST FAIL: exceeding error count threshold\n"); - else - printf(" + TEST PASS: not exceeding error count threshold\n"); + if (errors > TESTING_COUNT_TH) { + printf(" + OVERALL TEST RESULT: Non coherent DMA: FAIL (%d/%d)\n", errors, 2 * len); + strcpy(fail_log, "Non coherent DMA, "); + } + else { + printf(" + OVERALL TEST RESULT: Non coherent DMA: PASS (%d/%d)\n", errors, 2 * len); + pass_test_count += 1; + strcpy(pass_log, "Non coherent DMA, "); + } + total_test_count += 1; printf("\n============\n\n"); @@ -141,10 +155,16 @@ int main(int argc, char **argv) errors = validate_buffer(&buf[0][out_offset], gold[0]); - if (((float)errors / (float)len) > ERROR_COUNT_TH) - printf(" + TEST FAIL: exceeding error count threshold\n"); - else - printf(" + TEST PASS: not exceeding error count threshold\n"); + if (errors > TESTING_COUNT_TH) { + printf(" + OVERALL TEST RESULT: LLC-coherent DMA: FAIL (%d/%d)\n", errors, 2 * len); + strcat(fail_log, "LLC-coherent DMA, "); + } + else { + printf(" + OVERALL TEST RESULT: LLC-coherent DMA: PASS (%d/%d)\n", errors, 2 * len); + pass_test_count += 1; + strcat(pass_log, "LLC-coherent DMA, "); + } + total_test_count += 1; printf("\n============\n\n"); @@ -165,10 +185,16 @@ int main(int argc, char **argv) errors = validate_buffer(&buf[0][out_offset], gold[0]); - if (((float)errors / (float)len) > ERROR_COUNT_TH) - printf(" + TEST FAIL: exceeding error count threshold\n"); - else - printf(" + TEST PASS: not exceeding error count threshold\n"); + if (errors > TESTING_COUNT_TH) { + printf(" + OVERALL TEST RESULT: Fully-coherent DMA: FAIL (%d/%d)\n", errors, 2 * len); + strcat(fail_log, "Fully-coherent DMA, "); + } + else { + pass_test_count += 1; + printf(" + OVERALL TEST RESULT: Fully-coherent DMA: PASS (%d/%d)\n", errors, 2 * len); + strcat(pass_log, "Fully-coherent DMA, "); + } + total_test_count += 1; printf("\n============\n\n"); @@ -196,12 +222,34 @@ int main(int argc, char **argv) for (k = 0; k < NACC; k++) { errors = validate_buffer(&buf[k][out_offset], gold[k]); + fft_error[k] = errors; - if (((float)errors / (float)(len * NACC)) > ERROR_COUNT_TH) + if (errors > TESTING_COUNT_TH) printf(" + TEST FAIL fft.%d: exceeding error count threshold\n", k); - else + else { printf(" + TEST PASS fft.%d: not exceeding error count threshold\n", k); + fft_pass_count ++; + } + } + + if (fft_pass_count < NACC) { + printf(" + OVERALL TEST RESULT: Concurrent execution: FAIL (%d/%d).", NACC - fft_pass_count, NACC); + strcat(fail_log, "Concurrent execution, "); + } + else { + printf(" + OVERALL TEST RESULT: Concurrent execution: PASS."); + pass_test_count += 1; + strcat(pass_log, "Concurrent execution, "); } + for (k = 0; k < NACC; k++) { + errors = fft_error[k]; + if (errors > TESTING_COUNT_TH) + printf(" fft.%d: FAIL (%d/%d).", k, errors, 2 * len); + else + printf(" fft.%d: PASS (%d/%d).", k, errors, 2 * len); + } + total_test_count += 1; + printf("\n"); printf("\n============\n\n"); @@ -225,17 +273,41 @@ int main(int argc, char **argv) errors = validate_buffer(&buf[0][out_offset], gold[0]); - if (((float)errors / (float)(len * NACC)) > ERROR_COUNT_TH) - printf(" + TEST FAIL: exceeding error count threshold\n"); - else - printf(" + TEST PASS: not exceeding error count threshold\n"); + if (errors > TESTING_COUNT_TH) { + // printf(" + TEST FAIL: exceeding error count threshold of %d\n", TESTING_COUNT_TH); + printf(" + OVERALL TEST RESULT: Point-to-point: FAIL (%d/%d)\n", errors, 2 * len); + strcat(fail_log, "Point-to-point Test, "); + } + else { + // printf(" + TEST PASS: not exceeding error count threshold of %d\n", TESTING_COUNT_TH); + printf(" + OVERALL TEST RESULT: Point-to-point: PASS (%d/%d)\n", errors, 2 * len); + pass_test_count += 1; + strcat(pass_log, "Point-to-point Test, "); + } + total_test_count += 1; printf("\n============\n\n"); + /* Overall result */ + strcat(pass_log, "\0"); + strcat(fail_log, "\0"); + + if (pass_test_count == total_test_count) + printf("[PASS] FFT OVERALL TEST RESULT: PASS ALL! \n"); + else { + printf("[FAIL] FFT OVERALL TEST RESULT: FAIL! "); + // detailed result + printf("PASS %d: ", pass_test_count); + printf("%s", pass_log); + printf("FAIL %d: ", total_test_count - pass_test_count); + printf("%s\n", fail_log); + } + for (k = 0; k < NACC; k++) { free(gold[k]); esp_free(buf[k]); } + free(fft_error); return errors; } diff --git a/utils/scripts/actions-pipeline/README.md b/utils/scripts/actions-pipeline/README.md index db02069a65..b3648c7954 100644 --- a/utils/scripts/actions-pipeline/README.md +++ b/utils/scripts/actions-pipeline/README.md @@ -1,107 +1,64 @@ -### Regression Testing - -#### Get Modified Accelerators -This script accepts a configuration file called accelerators.json and uses the repository's Git history to determine modified accelerators. The configuration file is simply a JSON file where ESP members can specify what accelerators they wish to test. Imagine that a user makes modifications to many accelerators in a single Pull Request. We may only care about testing a subset of these at a given time. -By default the get_modified_accelerators.sh scripts can run end-to-end tests with each modified accelerator in a 2x2 SoC, but having the configuration file adds extra flexibility to define/override which accelerators are to be tested. - -The script compares the modified files (as specified in the Git history) to the accelerators in the configuration file. It then returns a list of the overlap of accelerators that should be tested, along with references to their HLS and behavioral `make` commands (ie. `make cholesky_stratus-hls` or `make cholesky_stratus-beh`). - -#### Run Simulations -The Run Simulations script accepts a list of testable accelerators via a bash list, and runs HLS for each one. The script first assesses how many accelerators have been scheduled for synthesis. -We consider that in reality a user may only modify one or two accelerators at a time, but designed this project such that if many accelerators are modified in one Pull Request -the program can balance them all effectively across different CPUs on the SOCP server and execute them in parallel. The script iterates through the list of accelerators -and starts and HLS run for each one, asynchronously. Then, it waits for all HLS jobs to return, logging the results for each job as they complete. Once all jobs have -completed, the script exits with success or error code. - -#### Run ESP Config -After the Run Simulations scripts executes, the Run ESP Config script should be executed. Run ESP Config is a bash script that completes the bitstream generation, -FPGA programming, and the test program execution on one of the ESP Xilinx FPGAs. Run ESP Config first looks through the tech/acc folder in the working repository. -This directory has all the work folders for the completed HLS jobs. The script iterates through all the successful accelerator HLS jobs and creates an ESP 2x2 SoC configuration -with that accelerator in the accelerator tile. The script then proceeds to run `make esp-config` and `make vivado-syn` to generate the HDL and bitstream for the -full SoC configuration. If this succeeds, the script will continue to run `make fpga-program` to program the generated bitstream on the FPGA, and execute a test -program using `make fpga-run`. The script opens up a minicom connection to the FPGA simultaneously to running `make fpga-run`. The results of the test program -are read out from the minicom to a log file for users to refer to after completion. - -If more than one accelerator has been modified, the script will start a new end-to-end test by creating the ESP configuration, generating HDL, generating the bitstream, -configuring the FPGA and running the test program with the next accelerator, and so on. - - -## Setting Up Pre-push Hook - -To ensure code formatting compliance, we provide a pre-push Git hook that runs before you push changes to the repository. Follow these steps to set it up: - -1. Navigate to the root directory of the repository. -2. Run the setup script to activate the pre-push hook: - ```bash - .githooks/./setup.sh - ``` - or - ```bash - bash .githooks/setup.sh - ``` - This script will automatically copy the pre-push hook to the appropriate location. - -Now, every time you try to push changes to the repository, the pre-push hook will run automatically to check code formatting compliance. - - -## Setting Up Self-Hosted GitHub Runners -GitHub Actions is a tool integrated into the ESP GitHub repository that consists of YAML workflows that can be triggered by different git operations such as push or pull request. -Normally, GitHub Actions is executed on a GitHub-hosted server and can be operated at no-cost with a limit on the number of workflow runs. -This is useful for many applications but for ESP, we have a variety of licensed tools that are used for HLS and other procedures which are not easily accessed via GitHub-hosted servers. -Instead, it makes sense for ESP to run regression workflows on the SOCP servers for more control and flexibility. For this, GitHub offers self-hosted runners which are easily configurable -via a downloadable executable program. The resources below explain how a self-hosted runner can be created and configured for ESP. - -For testing, I set up the actions-runner program under the ma4107 user in socp01, then executed the `run.sh` script within the project to enable the tool to listen for GitHub Actions -jobs. - -### Resources +# Regression Testing + +This PR introduces a regression testing suite for ESP. The main goals are: +1. Automate the process of testing a SoC design configuration on ESP. +2. Use GitHub Actions and Workflows to streamline the testing process. + +## Workflow overview +Given an ESP SoC design configuration, the automated process includes the following steps: + +Generate the bitstream. +Upload the bitstream to the target FPGA. +Run a baremetal “Hello” program. +Generate a Linux image for the SoC design. +Boot Linux on the FPGA. +Verify the boot result. +SSH into the booted system. +Execute the `multifft` application. + +The scripts developed in this work are under: +- `utils/scripts/actions-pipeline` +- `.github/workflows/regression-test-pro.yaml` +- `soft/common/apps/examples/multifft/multifft.c` + +*For a more comprehensive report, consult the SLD team or Professor Carloni. + +## Two Methods to Use This Testing Suite +1. Manual Script Execution +2. Rely on GitHub Runner + +## Method 1. Manual Script Execution +### Detail steps for execution +1. Set up ESP environment (sysroot, submodules, etc.) +2. Grant permission to scripts in this testing suite +3. Specify the target ESP config to run the flow. Modify the config in `utils/scripts/actions-pipeline/esp_configs.json`. Specify FPGA name to connect, path of ESP config to test, path to save the result log, UART and SSH credentials. +4. Optional: Start a tmux session before running following steps. Some steps within the flow takes 1-3 hours. +5. Navigate to the scripts directory: `cd esp/utils/scripts/actions-pipeline` +6. Generate bitstream file for the testing target. It will generate bitstream for first ESP config that you specified in step 3. Execute `./helper/gen_bitstream.sh` +7. Generate Linux image file for the testing target. Execute `./helper/gen_linux.sh` +8. Run the main flow that uses the files generated from step 6 and 7. Execute `./run_workflow.sh` +9. The result output log file will be saved in the path specified in `esp_configs.json` + +This method is stable and reliable for testing ESP configurations. + +## Method 2. GitHub Runner + Automated Workflow +### Design the workflow +The workflow is designed in a yaml file, located in `.github/workflows/regression-test-pro.yaml`. Now it is set as "triggered by push," which means that if the runner is running, when there's a push onto the specified branch, the actions listed in this workflow will be executed. Please modify as needed. + +### Detail steps for execution +1. Design the workflow in the YAML file with specification. Previous workflows are in `.github/workflows/regression-test-pro.yaml`. +2. Follow the instruction from GitHub to setup a worker. To whom from SLD Columbia to work on developing this in the future, I set the worker on `server thecaptain.cs.columbia.edu`, consult SLD for accessing the server. - [Adding self-hosted runners to the repository](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/adding-self-hosted-runners) - [Configuring and starting the runner](https://docs.github.com/en/actions/hosting-your-own-runners/managing-self-hosted-runners/adding-self-hosted-runners) +3. Start the runner +4. Trigger the workflow: After the runner is active, make code changes and `git push` (or whatever specified in workflow) to execute the workflow automatically. +⚠️ Disclaimer: This method is currently unstable. Tools like minicom may interfere with the session running the workflow. It is recommended to use Method 1 until Method 2 is fully debugged. -## Writing GitHub Actions YAML workflows -YAML workflows execute sccripts upon a push or pull request to a repository's branch. There is one GitHub Actions YAML workflow under [.github/workflows/regresstion-test.yaml](https://github.com/marianabuhazi/esp/blob/regression-flow/.github/workflows/regression-test.yaml) that -sets up the environment to run the tests (columbia-sld's ubuntu-small Docker image) and installs all the open-source linters. Then, it executes code formatting and testing scripts. - -## Run manually -The `regression` step in [.github/workflows/regresstion-test.yaml](https://github.com/marianabuhazi/esp/blob/regression-flow/.github/workflows/regression-test.yaml) includes the three steps that should be ran. First, run the 'Discover modified accelerators' step (./get_modified_accelerators.sh), then run HLS for all modified accelerators (./run_sims.sh). Lastly, generate the bitstream and program the FPGA (./run_esp-config.sh). - -### Installing open-source tools -#### vhdl-style-guide -``` -pip3 install vsg -export PATH="$PATH:/home/espuser/.local/bin/" -source ~/.bashrc -``` - -#### clang-format-10 -``` -sudo apt-get install clang-format-10 -``` - -#### autopep8 -``` -pip3 install autopep8 -``` - -#### verible -``` -wget https://github.com/chipsalliance/verible/releases/download/v0.0-3545-ge4028f19/verible-v0.0-3545-ge4028f19-linux-static-x86_64.tar.gz -tar -xvf verible-v0.0-3545-ge4028f19-linux-static-x86_64.tar.gz -rm verible-v0.0-3545-ge4028f19-linux-static-x86_64.tar.gz -mv verible-v0.0-3545-ge4028f19/ verible -export PATH=$PATH:/home/espuser/verible/bin -source ~/.bashrc -``` - -For more information on writing YAML workflows that are compatible with GitHub Actions, and documentation on these open-source linters check out the resources below. - -### Resources -- [Writing on pull_request workflows](https://docs.github.com/en/actions/using-workflows/workflow-syntax-for-github-actions) -- [vhdl-style-guide docs](https://vhdl-style-guide.readthedocs.io/en/latest/) -- [clang-format-10 docs](https://releases.llvm.org/10.0.0/tools/clang/docs/ClangFormat.html) -- [autopep8 docs](https://pypi.org/project/autopep8/) -- [verible repo + docs](https://github.com/chipsalliance/verible) +## About +This testing suite was developed in Spring 2025 at Columbia University, under the guidance of: +* Joseph Zuckerman (jzuck@cs.columbia.edu) +* Professor Luca Carloni (luca@cs.columbia.edu) -### Disclaimer -Surely, when these scripts and workflows get integrated into the ESP repository they will need to be modified to match the expectations and demands of the team. There is also lots of room for improvement and adding more advanced linting and testing features! Please contact Marian Abuhazi at ma4107@columbia.edu if you have more questions or want to brainstorm more! Happy to help 🤗 +For questions, suggestions, or support, contact: +Chia-Lin (Julie) Cheng – cc5210@columbia.edu, a report for this project can be provided for reference. \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/esp_configs.json b/utils/scripts/actions-pipeline/esp_configs.json new file mode 100644 index 0000000000..808255221f --- /dev/null +++ b/utils/scripts/actions-pipeline/esp_configs.json @@ -0,0 +1,14 @@ +{ + "configs": [ + { + "config_name": "config1", + "fpga_name": "xilinx-vc707-xc7vx485t", + "config_path": "socs/defconfig/esp_xilinx-vc707-xc7vx485t_testing", + "result_logs_path": "utils/scripts/actions-pipeline/xilinx-vc707-xc7vx485t_logs", + "UART_IP": "espdev.cs.columbia.edu", + "UART_PORT": "", + "SSH_IP": "128.59.22.75", + "SSH_PORT": "" + } + ] +} \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/get_hls_accelerators.sh b/utils/scripts/actions-pipeline/get_hls_accelerators.sh index 2e1d55b378..cbabd82e5a 100644 --- a/utils/scripts/actions-pipeline/get_hls_accelerators.sh +++ b/utils/scripts/actions-pipeline/get_hls_accelerators.sh @@ -32,6 +32,7 @@ for accelerator in "${accelerators[@]}"; do sizes+=("$size") done + # Sort by ASCII, rather than numeric values. TODO: sort -n sorted_sizes=($(printf "%s\n" "${sizes[@]}" | sort)) dma["$accelerator"]="${sorted_sizes[-1]#*_*_}" done diff --git a/utils/scripts/actions-pipeline/helper/execute_ssh_fft.sh b/utils/scripts/actions-pipeline/helper/execute_ssh_fft.sh new file mode 100755 index 0000000000..390caa6b1e --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/execute_ssh_fft.sh @@ -0,0 +1,6 @@ +#!/bin/bash + +# helper script to call expect to ssh to fpga + +sleep 10 +./ssh_fft.exp $1 $2 $3 $4 \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/helper/gen_bitstream.sh b/utils/scripts/actions-pipeline/helper/gen_bitstream.sh new file mode 100755 index 0000000000..c9e36e0ee9 --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/gen_bitstream.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +## set print styles +NC='\033[0m' +BOLD='\033[1m' +EMOJI_CHECK="\xE2\x9C\x94" + +## set JSON file. a JSON file with config information +json_file="../esp_configs.json" +if [ ! -f "$json_file" ]; then + echo "Error: JSON file '$json_file' not found!" + exit 1 +fi +config_count=$(jq '.configs | length' "$json_file") +# check if there are any configs +if [ "$config_count" -eq 0 ]; then + echo "Error: No configurations found in the JSON file!" + exit 1 +fi +# read JSON content +config_index=0 # this workflow template works on testing 1 config. could be expanded. +config_name=$(jq -r ".configs[$config_index].config_name" "$json_file") +fpga_name=$(jq -r ".configs[$config_index].fpga_name" "$json_file") +config_path=$(jq -r ".configs[$config_index].config_path" "$json_file") +result_logs_path=$(jq -r ".configs[$config_index].result_logs_path" "$json_file") + +echo "Config Name: $config_name" +echo "FPGA Name: $fpga_name" +echo "Config Path: $config_path" +echo "Result Logs Path: $result_logs_path" + +## Env setup --------------------------------------- +source /opt/cad/scripts/tools_env.sh +## set paths +ESP_ROOT=$(realpath ../../../../) +## set log files +logs="$ESP_ROOT/$result_logs_path" +if [ -d "$logs" ]; then + rm -rf "$logs" + echo "Overwriting existing $logs" +else + echo "Creating new $logs" +fi + +mkdir -p "$logs/esp" + + + +workflow_result="$logs/workflow_result.log" +## set esp config file +testing_config="$ESP_ROOT/$config_path" +esp_config="$ESP_ROOT/socs/$fpga_name/socgen/esp/.esp_config" # actual path for esp. will be replaced by target testing config. + +## SoC --------------------------------------- +cd "$ESP_ROOT/socs/$fpga_name" +## setup env +rm -rf vivado +rm -rf top.bit +make clean >/dev/null 2>&1 + +## prep files +echo -e "${BOLD}Configure ESP${NC}" +echo -e "Configure ESP" >> "$workflow_result" +cp "$testing_config" "$esp_config" +make esp-config > "$logs/esp/make_esp_config.log" 2>&1 + +echo -e "${BOLD}Running Logic Synthesis${NC}" +echo -e "Execute Logic Synthesis" >> "$workflow_result" +make vivado-syn > "$logs/esp/vivado_syn.log" 2>&1 # could be optimized +echo -e "${BOLD}Logic Synthesis done${NC}" +echo "Logic Synthesis done" >> "$workflow_result" + +if [ -s "top.bit" ]; then + echo -e "${BOLD}[PASS] Logic Synthesis Success${NC}" + echo "[PASS] Logic Synthesis Logic Synthesis" >> "$workflow_result" +else + echo -e "${BOLD}[FAIL] Logic Synthesis Fail${NC}" + echo "Logic Synthesis Fail" >> "$workflow_result" + exit 1 +fi diff --git a/utils/scripts/actions-pipeline/helper/gen_linux.sh b/utils/scripts/actions-pipeline/helper/gen_linux.sh new file mode 100755 index 0000000000..d0780a7e7f --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/gen_linux.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +## set print styles +NC='\033[0m' +BOLD='\033[1m' +EMOJI_CHECK="\xE2\x9C\x94" + +## set JSON file. a JSON file with config information +json_file="../esp_configs.json" +if [ ! -f "$json_file" ]; then + echo "Error: JSON file '$json_file' not found!" + exit 1 +fi +config_count=$(jq '.configs | length' "$json_file") +# check if there are any configs +if [ "$config_count" -eq 0 ]; then + echo "Error: No configurations found in the JSON file!" + exit 1 +fi +# read JSON content +config_index=0 # this workflow template works on testing 1 config. could be expanded. +config_name=$(jq -r ".configs[$config_index].config_name" "$json_file") +fpga_name=$(jq -r ".configs[$config_index].fpga_name" "$json_file") +config_path=$(jq -r ".configs[$config_index].config_path" "$json_file") +result_logs_path=$(jq -r ".configs[$config_index].result_logs_path" "$json_file") + +echo "Config Name: $config_name" +echo "FPGA Name: $fpga_name" +echo "Config Path: $config_path" +echo "Result Logs Path: $result_logs_path" + +## Env setup --------------------------------------- +source /opt/cad/scripts/tools_env.sh +## set paths +ESP_ROOT=$(realpath ../../../../) +## set log files +logs="$ESP_ROOT/$result_logs_path" +if [ ! -d "$logs" ]; then + mkdir -p "$logs" +fi +workflow_result="$logs/workflow_result.log" + +## Linux --------------------------------------- +## setup +cd "$ESP_ROOT/socs/$fpga_name" +make fft_stratus-hls +make linux-distclean +sleep 5 + +## prep files +make soft +make linux +make examples +make linux + +if [ -s "./soft-build/ariane/linux.bin" ]; then + echo -e "${BOLD}[PASS] 'make linux' pass${NC}" + echo "[PASS] 'make linux' pass" >> "$workflow_result" +else + echo -e "${BOLD}[FAIL] 'make linux' fail${NC}" + echo "[FAIL] 'make linux' fail" >> "$workflow_result" +fi \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/helper/monitor_linux_boot.sh b/utils/scripts/actions-pipeline/helper/monitor_linux_boot.sh new file mode 100755 index 0000000000..c6392e66f2 --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/monitor_linux_boot.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# helper script to monitor linux boot progress + +# Target +SUCCESS_PATTERN="Welcome to ESP" + +# Ensure log file exist +if [ ! -f "$1" ]; then + echo "Log file $1 does not exist yet. Waiting..." + sleep 10 + continue +fi + +# Monitor the log file for success pattern +while true; do + if grep -q "$SUCCESS_PATTERN" "$1"; then + echo "Boot completed successfully!" + killall -u $(whoami) minicom + sleep 3 + exit 0 + fi + + # Check for common failure patterns + if grep -q "Kernel panic" "$1"; then + echo "Boot failed: Kernel panic detected" + killall -u $(whoami) minicom + sleep 3 + exit 1 + fi + + sleep 3 +done \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/helper/run_fpga_linux.sh b/utils/scripts/actions-pipeline/helper/run_fpga_linux.sh new file mode 100755 index 0000000000..3a530b44d7 --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/run_fpga_linux.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +sleep 5 + +make fpga-run-linux \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/helper/run_fpga_run.sh b/utils/scripts/actions-pipeline/helper/run_fpga_run.sh new file mode 100755 index 0000000000..dc104ebb18 --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/run_fpga_run.sh @@ -0,0 +1,15 @@ +#!/bin/bash + +# helper script to execute baremetal hello, check result. + +sleep 10 +make fpga-run + +sleep 10 +if grep -q "Hello from ESP!" "$1"; then + echo "[PASS] Baremetal hello message found (helper)" >> "$2" +else + echo "[FAIL] Baremetal hello message not found (helper)" >> "$2" +fi + +killall -9 -u $(whoami) minicom \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/helper/ssh_fft.exp b/utils/scripts/actions-pipeline/helper/ssh_fft.exp new file mode 100755 index 0000000000..9cc4b7452c --- /dev/null +++ b/utils/scripts/actions-pipeline/helper/ssh_fft.exp @@ -0,0 +1,119 @@ +#!/usr/bin/expect -f + +# original ssh command: ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p 5502 root@espgate.cs.columbia.edu +# Usage: ./ssh_fft.exp +# Usage: ./ssh_fft.exp 5502 root espgate.cs.columbia.edu openesp + +# Get command line arguments +if {$argc < 4} { + puts "Usage: $argv0 port username hostname password" + exit 1 +} +set port [lindex $argv 0] +set username [lindex $argv 1] +set hostname [lindex $argv 2] +set password [lindex $argv 3] + +# Set timeout +set timeout 30 + +# Start the SSH process with specific port +spawn ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -p $port $username@$hostname + +# Handle different possible login prompts +expect { + "yes/no" { + send "yes\r" + exp_continue + } + "$username@$hostname's password:" { + send "$password\r" + } + timeout { + puts "Connection timed out" + exit 1 + } +} + +# Check for successful login +expect { + -re "\\\$|#|>" { + puts "Successfully logged in to $hostname on port $port" + } + "Permission denied" { + puts "Login failed - incorrect username or password" + exit 1 + } + timeout { + puts "Login process timed out" + exit 1 + } +} + +# Change directory in SSH +set fftpath "../examples/multifft" +set fftfile "multifft.exe" + +send "if \[ -d \"$fftpath\" \]; then echo \"DIR_EXISTS\"; else echo \"DIR_NOT_FOUND\"; fi\r" +expect { + "DIR_EXISTS" { + puts "Directory exists, continue with changing directory to ../examples/multifft" + send "cd $fftpath\r" + } + "DIR_NOT_FOUND" { + # Directory doesn't exist, handle the error + puts "Directory not found - /examples/multifft" + exit 1 + } +} + +# Try execute FFT +send "if \[ -f \"$fftfile\" \]; then echo \"FILE_EXISTS\"; else echo \"FILE_NOT_FOUND\"; fi\r" +expect { + "FILE_EXISTS" { + puts "File found - multifft.exe" + } + "FILE_NOT_FOUND" { + puts "Error: File not found!" + expect -re "\\\$|#|>" + send "exit\r" + expect eof + exit 1 + } +} + +# Execute FFT file +expect -re "\\\$|#|>" +puts "Execute multifft.exe" +send "./$fftfile\r" + +# Set up a loop to handle the prompts of FFT + +# This expect command will loop automatically due to exp_continue +expect { + -re {\s*\*\* Press ENTER to START \*\*\s*} { + send "\r" + exp_continue + } + -re "\\\$|#|>" { + # Exit ssh (Takes time, like 10 sec, but yes it will exit.) + send "exit\r" + expect eof + exit 0 + } + eof { + # End of file reached - program has terminated + puts "Program has completed execution" + } + timeout { + # Timeout, force exit + puts "Timeout occurred" + } +} + +# Exit ssh (Takes time, like 10 sec, but yes it will exit.) +send "exit\r" +exit 0 + +# Now manually interactive with the remote system +# interact \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/run_esp-config.sh b/utils/scripts/actions-pipeline/run_esp-config.sh index c7fe08e2eb..7fef1e041d 100644 --- a/utils/scripts/actions-pipeline/run_esp-config.sh +++ b/utils/scripts/actions-pipeline/run_esp-config.sh @@ -14,6 +14,8 @@ esp_config="$HOME/esp/socs/xilinx-vc707-xc7vx485t/socgen/esp/.esp_config" # FPGA run fpga_run="$HOME/esp/utils/scripts/actions-pipeline/./run_fpga_program.sh" +# FPGA run linux +fpga_run_linux="$HOME/esp/utils/scripts/actions-pipeline/./run_fpga_linux.sh" for accelerator in "${!dma[@]}"; do accelerator_upper=$(echo "$accelerator" | tr '[:lower:]' '[:upper:]') @@ -24,7 +26,9 @@ for accelerator in "${!dma[@]}"; do fpga_program="$logs/program/$accelerator.log" vivado_syn="$logs/hls/$accelerator.log" minicom="$logs/run/minicom_$accelerator.log" + minicom_linux="$logs/run/minicom_linux_$accelerator.log" run="$logs/run/run_$accelerator.log" + linux="$logs/soft/$accelerator.log" # Swap in the appropriate accelerator cp "$defconfig" "$esp_config" @@ -78,13 +82,46 @@ for accelerator in "${!dma[@]}"; do VIRTUAL_DEVICE=$(readlink ttyV0) # Run fpga program in the background - echo -e "${BOLD}WRITING RESULTS TO MINICOM...${NC}" + echo -e "${BOLD}WRITING BAREMETAL RESULTS TO MINICOM...${NC}" echo "" - $fpga_run > "$run" 2>&1 & - minicom -p "$VIRTUAL_DEVICE" -C "$minicom" 2>&1 + $fpga_run > "$run" 2>&1 & # make fpga-run and kill minicom. runs in background. + minicom -p "$VIRTUAL_DEVICE" -C "$minicom" 2>&1 # open and close the minicom. run in foreground. + + # End of work. Terminate the process. kill -9 "$socat_pid" else echo -e "${BOLD}BITSTREAM GENERATION FAILED...${NC}" echo -e " - $accelerator" fi + + # Software flow. Create Linux image and run Linux on FPGA. + # TODO: Compile linux by $ make linux + echo "" + echo -e "${BOLD}STARTING COMPILE LINUX FOR ESP...${NC}" + echo -e " ${EMOJI_CHECK} $accelerator" + make linux > "$linux" 2>&1 + + # TODO: Run fpga linux in background if make linux success + if [-s "./soft-build/ariane/linux.bin"]; then + # Open Minicom in the foreground + echo -e "${BOLD}OPENING MINICOM...${NC}" + echo "" + socat pty,link=ttyV0,waitslave,mode=777 tcp:goliah.cs.columbia.edu:4332 & + socat_pid=$! + sleep 2 + VIRTUAL_DEVICE=$(readlink ttyV0) + + # Run fpga linux + echo -e "${BOLD}WRITING LINUX RESULTS TO MINICOM...${NC}" + echo "" + $fpga_run_linux > "$run" 2>&1 & # make fpga-run-fpga and kill minicom. runs in background. + minicom -p "$VIRTUAL_DEVICE" -C "$minicom_linux" 2>&1 # open and close the minicom. run in foreground. + + # End of work. Terminate the process. + kill -9 "$socat_pid" + else + echo -e "${BOLD}LINUX GENERATION FAILED...${NC}" + echo -e " - $accelerator" + fi + done \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/run_fpga_program.sh b/utils/scripts/actions-pipeline/run_fpga_program.sh deleted file mode 100644 index c35db2e9dc..0000000000 --- a/utils/scripts/actions-pipeline/run_fpga_program.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/bin/bash - -cd "$HOME/esp/socs/xilinx-vc707-xc7vx485t" -sleep 10 - -make fpga-run - -! killall -9 minicom \ No newline at end of file diff --git a/utils/scripts/actions-pipeline/run_workflow.sh b/utils/scripts/actions-pipeline/run_workflow.sh new file mode 100755 index 0000000000..a4f07e6079 --- /dev/null +++ b/utils/scripts/actions-pipeline/run_workflow.sh @@ -0,0 +1,248 @@ +#!/bin/bash + +# set print styles +NC='\033[0m' +BOLD='\033[1m' +EMOJI_CHECK="\xE2\x9C\x94" + +## set JSON file. a JSON file with config information +json_file="esp_configs.json" +if [ ! -f "$json_file" ]; then + echo "Error: JSON file '$json_file' not found!" + exit 1 +fi +config_count=$(jq '.configs | length' "$json_file") +# check if there are any configs +if [ "$config_count" -eq 0 ]; then + echo "Error: No configurations found in the JSON file!" + exit 1 +fi +# read JSON content +config_index=0 # this workflow template works on testing 1 config. could be expanded. +config_name=$(jq -r ".configs[$config_index].config_name" "$json_file") +fpga_name=$(jq -r ".configs[$config_index].fpga_name" "$json_file") +config_path=$(jq -r ".configs[$config_index].config_path" "$json_file") +result_logs_path=$(jq -r ".configs[$config_index].result_logs_path" "$json_file") +uart_ip=$(jq -r ".configs[$config_index].UART_IP" "$json_file") +uart_port=$(jq -r ".configs[$config_index].UART_PORT" "$json_file") +ssh_ip=$(jq -r ".configs[$config_index].SSH_IP" "$json_file") +ssh_port=$(jq -r ".configs[$config_index].SSH_PORT" "$json_file") + +echo "Config Name: $config_name" +echo "FPGA Name: $fpga_name" +echo "Config Path: $config_path" +echo "Result Logs Path: $result_logs_path" +echo "UART IP: $uart_ip" +echo "UART Port: $uart_port" +echo "SSH IP: $ssh_ip" +echo "SSH Port: $ssh_port" + +## Env setup --------------------------------------- +source /opt/cad/scripts/tools_env.sh # for unit testing script +## set paths +ESP_ROOT=$(realpath ../../../) + +## set log files +logs="$ESP_ROOT/$result_logs_path" +if [ ! -d "$logs" ]; then + mkdir -p "$logs" +else + # optional to remove existing log folder + # rm -rf "$logs" + echo "Directory $logs already exists." +fi + +if [ ! -d "$logs/esp" ]; then + mkdir -p "$logs/esp" +else + # optional to remove existing log folder + # rm -rf "$logs/esp" + echo "Directory $logs/esp already exists." +fi + +mkdir -p "$logs/fpga" # results happened on fpga +workflow_result="$logs/workflow_result.log" +fpga_program_log="$logs/esp/fpga_program.log" +fpga_run_log="$logs/esp/fpga_run.log" +fpga_run_linux_log="$logs/esp/fpga_run_linux.log" +boot_linux_log="$logs/fpga/boot_linux.log" +minicom_log="$logs/fpga/minicom_baremetal.log" +minicom_boot_linux_log="$logs/fpga/minicom_boot_linux.log" +ssh_fft_log="$logs/fpga/ssh_fft.log" + +## set helper scripts +gen_bit="$ESP_ROOT/utils/scripts/actions-pipeline/helper/gen_bitstream.sh" +gen_linux="$ESP_ROOT/utils/scripts/actions-pipeline/helper/gen_linux.sh" +fpga_run="$ESP_ROOT/utils/scripts/actions-pipeline/helper/run_fpga_run.sh $minicom_log $workflow_result" +fpga_run_linux="$ESP_ROOT/utils/scripts/actions-pipeline/helper/run_fpga_linux.sh" +monitor="$ESP_ROOT/utils/scripts/actions-pipeline/helper/monitor_linux_boot.sh $minicom_boot_linux_log" +exe_ssh_fft="$ESP_ROOT/utils/scripts/actions-pipeline/helper/execute_ssh_fft.sh $ssh_port root $ssh_ip openesp" +## set set esp config file +testing_config="$ESP_ROOT/$config_path" +esp_config="$ESP_ROOT/socs/$fpga_name/socgen/esp/.esp_config" # actual path for esp. will be replaced by target testing config +cp "$testing_config" "$logs/esp" # copy a testing config into log folder for reference + +## SoC --------------------------------------- +cd "$ESP_ROOT/socs/$fpga_name" + +# ## config and HLS. (optional to run together with this workflow) +# $gen_bit +# gen_bit_pid=$! +# wait $gen_bit +# EXIT_CODE=$? +# if [ $EXIT_CODE -eq 1 ]; then +# echo -e "${BOLD}[FAIL] Generate Bitstream failed${NC}" +# echo "[FAIL] Generate Bitstream failed" >> "$workflow_result" +# exit 1 +# fi + +## run on fpga +if [ -s "top.bit" ]; then + echo -e "${BOLD}Bitstream is found${NC}" + echo "Bitstream is found" >> "$workflow_result" + + # make fpga-program + cd "$ESP_ROOT/socs/$fpga_name" + echo -e "${BOLD}..... Try to program FPGA${NC}" + echo "..... Try to program FPGA" >> "$workflow_result" + make fpga-program > "$fpga_program_log" 2>&1 + if grep -q ERROR "$fpga_program_log"; then + echo -e "${BOLD}[FAIL] 'make fpga-program' failed${NC}" + echo "[FAIL] 'make fpga-program' failed" >> "$workflow_result" + exit 1 + else + echo -e "${BOLD}[PASS] 'make fpga-program' pass${NC}" + echo "[PASS] 'make fpga-program' pass" >> "$workflow_result" + fi + + # open minicom session + killall -9 -u $(whoami) minicom # make sure no running minicom + echo -e "${BOLD}..... Try to open minicom${NC}" + echo "..... Try to open minicom" >> "$workflow_result" + socat pty,link=ttyV0,waitslave,mode=777 tcp:$uart_ip:$uart_port & + socat_pid=$! + sleep 2 + VIRTUAL_DEVICE=$(readlink ttyV0) + + # make fpga-run in background + echo -e "${BOLD}..... Writing baremetal to minicom${NC}" + echo -e "..... Writing baremetal to minicom" >> "$workflow_result" + cd "$ESP_ROOT/socs/$fpga_name" + $fpga_run > "$fpga_run_log" 2>&1 & + fpgarun_pid=$! + + # open minicom in foreground + minicom -p "$VIRTUAL_DEVICE" -C "$minicom_log" 2>&1 + # make fpga-run script will kill minicom + + # check "hello" message + wait $fpgarun_pid + EXIT_CODE=$? + if [ $EXIT_CODE -eq 1 ]; then + echo -e "${BOLD}[FAIL] 'make fpga-run' failed" + echo "[FAIL] 'make fpga-run' failed" >> "$workflow_result" + # clean minicom + rm -rf ttyV0 + kill -9 "$socat_pid" + killall -9 -u $(whoami) minicom + exit 1 + else + echo -e "${BOLD}[PASS] 'make fpga-run' pass" + echo "[PASS] 'make fpga-run' pass" >> "$workflow_result" + + if grep -q "Hello from ESP!" "$minicom_log"; then + echo -e "${BOLD} -- Baremetal hello message found" + echo " -- Baremetal hello message found" >> "$workflow_result" + else + echo -e "${BOLD}[FAIL] Baremetal hello message not found${NC}" + echo "[FAIL] Baremetal hello message not found" >> "$workflow_result" + fi + fi +else + echo -e "${BOLD}[FAIL] Bitstream not found${NC}" + echo "[FAIL] Bitstream not found" >> "$workflow_result" + exit 1 +fi +# clean minicom +rm -rf ttyV0 +kill -9 "$socat_pid" +killall -9 -u $(whoami) minicom + +## Linux --------------------------------------- +# ## Generate Linux Image. (optional to run together with this workflow) +# $gen_linux +# gen_linux_pid=$! +# wait $gen_linux +# EXIT_CODE=$? +# if [ $EXIT_CODE -eq 1 ]; then +# echo -e "${BOLD}[FAIL] Generate Linux failed${NC}" +# echo "[FAIL] Generate Linux failed" >> "$workflow_result" +# exit 1 +# fi + +cd "$ESP_ROOT/socs/$fpga_name" +## run on fpga +if [ -s "./soft-build/ariane/linux.bin" ]; then + echo -e "${BOLD}[PASS] Linux image is found${NC}" + echo "[PASS] Linux image is found" >> "$workflow_result" + + # open minicom session + killall -9 -u $(whoami) minicom # make sure no running minicom + echo -e "${BOLD}..... Try to open minicom${NC}" + echo "..... Try to open minicom" >> "$workflow_result" + socat pty,link=ttyV0,waitslave,mode=777 tcp:$uart_ip:$uart_port & + socat_pid=$! + sleep 2 + VIRTUAL_DEVICE=$(readlink ttyV0) + + # make fpga-run-linux in background + echo -e "${BOLD}..... Try to boot linux${NC}" + echo "..... Try to boot linux" >> "$workflow_result" + cd "$ESP_ROOT/socs/$fpga_name" + $fpga_run_linux > "$fpga_run_linux_log" 2>&1 & + + # call helper to monitor linux boot progress + $monitor > "$boot_linux_log" 2>&1 & + monitor_pid=$! + + # open minicom in foreground + minicom -p "$VIRTUAL_DEVICE" -C "$minicom_boot_linux_log" 2>&1 + # monitor kills minicom if boot successfully + + # print monitor status + wait $monitor_pid + EXIT_CODE=$? + if [ $EXIT_CODE -eq 0 ]; then + echo -e "${BOLD}[PASS] Linux boot pass${NC}" + echo "[PASS] Linux boot pass" >> "$workflow_result" + + ## Application --------------------------------------- + # execute ssh and fft + cd "$ESP_ROOT/utils/scripts/actions-pipeline/helper" + echo -e "${BOLD}..... Try ssh and run fft${NC}" + echo "..... Try ssh and run fft" >> "$workflow_result" + $exe_ssh_fft > "$ssh_fft_log" 2>&1 + echo -e "${BOLD}..... End of ssh and run fft${NC}" + echo "..... End of ssh and run fft" >> "$workflow_result" + + # redirect overall fft result into main workflow result file + grep "FFT OVERALL TEST RESULT" "$ssh_fft_log" >> "$workflow_result" + else + echo -e "${BOLD}[FAIL] Linux boot fail${NC}" + echo "[FAIL] Linux boot fail" >> "$workflow_result" + # clean minicom + rm -rf ttyV0 + kill -9 "$socat_pid" + killall -9 -u $(whoami) minicom + exit 1 + fi + +else + echo -e "${BOLD}[FAIL] Linux image not found${NC}" + echo "[FAIL] Linux image not found" >> "$workflow_result" + exit 1 +fi +# clean minicom +rm -rf ttyV0 +kill -9 "$socat_pid" +killall -9 -u $(whoami) minicom \ No newline at end of file diff --git a/utils/scripts/submodule_init.sh b/utils/scripts/submodule_init.sh index 57540dcad8..5aa8bfa0c1 100755 --- a/utils/scripts/submodule_init.sh +++ b/utils/scripts/submodule_init.sh @@ -16,66 +16,68 @@ noyes () { done } -INSTALL_ARIANE=0 +# hardcoded flags for regression testing on Arian +INSTALL_ARIANE=1 INSTALL_IBEX=0 -INSTALL_ARIANE_LINUX=0 +INSTALL_ARIANE_LINUX=1 INSTALL_LEON3_LINUX=0 -INSTALL_STRATUS_INC=0 +INSTALL_STRATUS_INC=1 INSTALL_MATCHLIB=0 INSTALL_CHISEL=0 -INSTALL_NVDLA=0 +INSTALL_NVDLA=1 INSTALL_SPANDEX=0 INSTALL_BASEJUMP=0 INSTALL_ZYNQ=0 INSTALL_EIGEN=0 -if [ $(noyes "*** QUESTION : Do you want to install the Ariane core?") == "y" ]; then - INSTALL_ARIANE=1 -fi +# disable interactive installation for regression testing on Arian +# if [ $(noyes "*** QUESTION : Do you want to install the Ariane core?") == "y" ]; then +# INSTALL_ARIANE=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install the Ibex core?") == "y" ]; then - INSTALL_IBEX=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install the Ibex core?") == "y" ]; then +# INSTALL_IBEX=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install Linux for the Ariane core?") == "y" ]; then - INSTALL_ARIANE_LINUX=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install Linux for the Ariane core?") == "y" ]; then +# INSTALL_ARIANE_LINUX=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install Linux for the Leon3 core?") == "y" ]; then - INSTALL_LEON3_LINUX=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install Linux for the Leon3 core?") == "y" ]; then +# INSTALL_LEON3_LINUX=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install support for accelerator design with Stratus HLS?") == "y" ]; then - INSTALL_STRATUS_INC=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install support for accelerator design with Stratus HLS?") == "y" ]; then +# INSTALL_STRATUS_INC=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install support for accelerator design with Matchlib in Catapult HLS?") == "y" ]; then - INSTALL_MATCHLIB=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install support for accelerator design with Matchlib in Catapult HLS?") == "y" ]; then +# INSTALL_MATCHLIB=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install support for accelerator design with Chisel?") == "y" ]; then - INSTALL_CHISEL=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install support for accelerator design with Chisel?") == "y" ]; then +# INSTALL_CHISEL=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install NVDLA?") == "y" ]; then - INSTALL_NVDLA=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install NVDLA?") == "y" ]; then +# INSTALL_NVDLA=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install Spandex caches?") == "y" ]; then - INSTALL_SPANDEX=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install Spandex caches?") == "y" ]; then +# INSTALL_SPANDEX=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install Basejump to simulate designs with a DDR controller?") == "y" ]; then - INSTALL_BASEJUMP=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install Basejump to simulate designs with a DDR controller?") == "y" ]; then +# INSTALL_BASEJUMP=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install support for Zynq boards?") == "y" ]; then - INSTALL_ZYNQ=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install support for Zynq boards?") == "y" ]; then +# INSTALL_ZYNQ=1 +# fi -if [ $(noyes "*** QUESTION : Do you want to install Eigen?") == "y" ]; then - INSTALL_EIGEN=1 -fi +# if [ $(noyes "*** QUESTION : Do you want to install Eigen?") == "y" ]; then +# INSTALL_EIGEN=1 +# fi echo "*** Installing Required and Selected Submodules ***" git submodule update --init --recursive rtl/caches/esp-caches diff --git a/utils/toolchain/build_riscv_toolchain.sh b/utils/toolchain/build_riscv_toolchain.sh index a0fa52eb33..885996bf1c 100755 --- a/utils/toolchain/build_riscv_toolchain.sh +++ b/utils/toolchain/build_riscv_toolchain.sh @@ -18,7 +18,7 @@ BUILDROOT_SHA_PYTHON=fbff7d7289cc95db991184f890f4ca1fcf8a101e # A patch for buildroot RISCV64 with numpy enabled BUILDROOT_PATCH=${ESP_ROOT}/utils/toolchain/python-patches/python-numpy.patch -DEFAULT_TARGET_DIR="/home/${USER}/riscv" +DEFAULT_TARGET_DIR="/home/esp2025/${USER}/riscv_2" TMP=${ESP_ROOT}/_riscv_build # Helper functions