Skip to content

Commit

Permalink
Optimize benchmark workflow.
Browse files Browse the repository at this point in the history
  • Loading branch information
xiaofei0800 committed May 13, 2024
1 parent fa893cb commit 95e0877
Show file tree
Hide file tree
Showing 5 changed files with 120 additions and 94 deletions.
44 changes: 26 additions & 18 deletions .github/workflows/plot-benchmark.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,13 @@
# Benchmark scenarios
SCENARIO = ["long", "short"]

# QUIC implements.
# QUIC implementations.
# The first element is used as the normalization base.
IMPLS = ["lsquic", "tquic"]

# Round of benchmark in one scenario.
ROUND = 5

# File sizes in long connection scenario benchmark.
LONG_FILE_SIZES = ["15K", "50K", "2M"]

Expand All @@ -38,9 +41,9 @@
DAYS = 90

# Read data from benchmark result file.
def read_data(data_dir, scen, impl, size, conn, stream, date):
dirname = "benchmark_%s_%s_%s_%d_%d.%s" % (scen, impl, size, conn, stream, date)
filename = "benchmark_%s_%s_%s_%d_%d.%s" % (scen, impl, size, conn, stream, date)
def read_data(data_dir, scen, impl, size, conn, stream, round, date):
dirname = "benchmark_%s_%s_%d_%d.%s" % (scen, size, conn, stream, date)
filename = "benchmark_%s_%s_%s_%d_%d.%d.%s" % (scen, impl, size, conn, stream, round, date)
path = os.path.join(data_dir, dirname, filename)
try:
with open(path) as f:
Expand All @@ -49,10 +52,10 @@ def read_data(data_dir, scen, impl, size, conn, stream, date):
except:
return 0.0

# Put benchmark result in array according to implement.
# Load benchmark results into array.
def prepare_data(data_dir):
titles = [' ' for _ in range((len(LONG_FILE_SIZES)*len(LONG_CONNS)*len(LONG_STREAMS) + len(SHORT_FILE_SIZES)*len(SHORT_CONNS)*len(SHORT_STREAMS)))]
result = [[[0.0 for _ in range(len(LONG_FILE_SIZES)*len(LONG_CONNS)*len(LONG_STREAMS) + len(SHORT_FILE_SIZES)*len(SHORT_CONNS)*len(SHORT_STREAMS))] for _ in range(len(IMPLS))] for _ in range(DAYS)]
result = [[[[0.0 for _ in range(len(LONG_FILE_SIZES)*len(LONG_CONNS)*len(LONG_STREAMS) + len(SHORT_FILE_SIZES)*len(SHORT_CONNS)*len(SHORT_STREAMS))] for _ in range(len(IMPLS))] for _ in range(DAYS)] for _ in range(ROUND) ]

# Load long connection scenario result.
I = len(LONG_FILE_SIZES)
Expand All @@ -66,8 +69,9 @@ def prepare_data(data_dir):
titles[i*J*K+j*K+k] = "long %s %d %d" % (LONG_FILE_SIZES[i], LONG_CONNS[j], LONG_STREAMS[k])
for n in range(N):
for d in range(D):
date = (datetime.datetime.now() - datetime.timedelta(days=d)).strftime('%Y-%m-%d')
result[D-1-d][n][i*J*K+j*K+k] = read_data(data_dir, "long", IMPLS[n], LONG_FILE_SIZES[i], LONG_CONNS[j], LONG_STREAMS[k], date)
for r in range(ROUND):
date = (datetime.datetime.now() - datetime.timedelta(days=d)).strftime('%Y-%m-%d')
result[r][D-1-d][n][i*J*K+j*K+k] = read_data(data_dir, "long", IMPLS[n], LONG_FILE_SIZES[i], LONG_CONNS[j], LONG_STREAMS[k], r, date)

# Load short connection scenario result.
M = len(LONG_FILE_SIZES)*len(LONG_CONNS)*len(LONG_STREAMS)
Expand All @@ -82,19 +86,23 @@ def prepare_data(data_dir):
titles[M+i*J*K+j*K+k] = "short %s %d %d" % (SHORT_FILE_SIZES[i], SHORT_CONNS[j], SHORT_STREAMS[k])
for n in range(N):
for d in range(D):
date = (datetime.datetime.now() - datetime.timedelta(days=d)).strftime('%Y-%m-%d')
result[D-1-d][n][M+i*J*K+j*K+k] = read_data(data_dir, "short", IMPLS[n], SHORT_FILE_SIZES[i], SHORT_CONNS[j], LONG_STREAMS[k], date)
for r in range(ROUND):
date = (datetime.datetime.now() - datetime.timedelta(days=d)).strftime('%Y-%m-%d')
result[r][D-1-d][n][M+i*J*K+j*K+k] = read_data(data_dir, "short", IMPLS[n], SHORT_FILE_SIZES[i], SHORT_CONNS[j], LONG_STREAMS[k], r, date)

# Average by rounds.
result_avg = np.mean(np.array(result), axis=0).tolist()

# Normalize benchmark result.
for d in range(D):
base = result[d][0]
for i in range(1, len(result[d])):
result[d][i] = [round(x/y, 4) if y != 0 else 0 for x, y in zip(result[d][i], base)]
for i in range(len(result[d][0])):
if result[d][0][i] != 0:
result[d][0][i] = 1

return titles, result
base = result_avg[d][0]
for i in range(1, len(result_avg[d])):
result_avg[d][i] = [round(x/y, 4) if y != 0 else 0 for x, y in zip(result_avg[d][i], base)]
for i in range(len(result_avg[d][0])):
if result_avg[d][0][i] != 0:
result_avg[d][0][i] = 1

return titles, result_avg

# Print benchmark performance result to stdout.
def show(titles, result):
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/plot-fct.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import matplotlib.pyplot as plt
import numpy as np

# QUIC implements
# QUIC implementations
IMPLS = ["tquic", "gquiche", "lsquic", "picoquic", "quiche"]

# Different modes
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/plot-goodput.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
import matplotlib.pyplot as plt
import numpy as np

# QUIC implements
# QUIC implementations
IMPLS = ["tquic", "gquiche", "lsquic", "picoquic", "quiche"]

# Different file sizes
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/plot-interop.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
import numpy as np
from matplotlib.colors import ListedColormap

# QUIC implements
# QUIC implementations
CLIENT_IMPLS = ["tquic", "lsquic", "quiche", "picoquic", "ngtcp2", "msquic",
"s2n-quic", "quinn", "neqo", "kwik", "aioquic", "chrome",
"go-x-net", "quic-go", "mvfst"]
Expand Down
164 changes: 91 additions & 73 deletions .github/workflows/tquic-benchmark.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,31 @@ jobs:
runs-on: ubuntu-latest
outputs:
benchmark_date: ${{ steps.set-benchmark-date.outputs.benchmark_date }}
benchmark_impls: ${{ steps.set-implements.outputs.benchmark_impls }}
benchmark_rounds: ${{ steps.set-rounds.outputs.benchmark_rounds }}
benchmark_duration: ${{ steps.set-duration.outputs.benchmark_duration }}
steps:
- name: Set date
id: set-benchmark-date
run: |
BENCHMARK_DATE=$(date -u +"%Y-%m-%d")
echo $BENCHMARK_DATE
echo "benchmark_date=$BENCHMARK_DATE" >> $GITHUB_OUTPUT
- name: Set implements
id: set-implements
run: |
IMPLS="lsquic tquic"
echo "benchmark_impls=$IMPLS" >> $GITHUB_OUTPUT
- name: Set rounds
id: set-rounds
run: |
ROUNDS=2
echo "benchmark_rounds=$ROUNDS" >> $GITHUB_OUTPUT
- name: Set benchmark duration
id: set-duration
run: |
DURATION=10
echo "benchmark_duration=$DURATION" >> $GITHUB_OUTPUT
build_tquic:
name: Build tquic
Expand All @@ -38,7 +56,7 @@ jobs:
cp target/release/tquic_client tquic_client
- name: Build start script
run: |
echo $'#!/bin/bash\nchmod u+x ./tquic_server\n./tquic_server --send-udp-payload-size 1350 --log-level OFF --root ./ --disable-stateless-reset -l 0.0.0.0:4433 -c ./cert.crt -k ./cert.key &' > start_tquic.sh
echo $'#!/bin/bash\ncd "$(dirname "$0")"\nchmod u+x ./tquic_server\n./tquic_server --send-udp-payload-size 1350 --log-level OFF --root ../files --disable-stateless-reset -l 0.0.0.0:4433 -c ../cert/cert.crt -k ../cert/cert.key &' > start_tquic.sh
chmod u+x start_tquic.sh
- name: Upload tquic_server
uses: actions/upload-artifact@v4
Expand Down Expand Up @@ -76,7 +94,7 @@ jobs:
cp bin/http_server ../lsquic_server
- name: Build start script
run: |
echo $'#!/bin/bash\nchmod u+x ./lsquic_server\n./lsquic_server -c tquic_benchmark,./cert.crt,./cert.key -s 0.0.0.0:4433 -r ./ -L crit > lsquic.log 2>&1 &' > start_lsquic.sh
echo $'#!/bin/bash\ncd "$(dirname "$0")"\nchmod u+x ./lsquic_server\n./lsquic_server -c tquic_benchmark,../cert/cert.crt,../cert/cert.key -s 0.0.0.0:4433 -r ../files -L crit > lsquic.log 2>&1 &' > start_lsquic.sh
chmod u+x start_lsquic.sh
- name: Upload lsquic server
uses: actions/upload-artifact@v4
Expand Down Expand Up @@ -122,118 +140,118 @@ jobs:
runs-on: ubuntu-latest
strategy:
matrix:
server: [ tquic, lsquic ]
file: [ 15K, 50K, 2M ]
conn: [ 10 ]
stream: [ 1, 10 ]
steps:
- name: Download ${{ matrix.server }} server
uses: actions/download-artifact@v4
with:
name: ${{ matrix.server }}_server_bin
- name: Download cert
uses: actions/download-artifact@v4
with:
name: cert
- name: Download files
uses: actions/download-artifact@v4
with:
name: files
- name: Download tquic_client
- name: Download all
uses: actions/download-artifact@v4
with:
name: tquic_client_bin
- name: Display structure of downloaded files
run: ls -R
- name: Start ${{ matrix.server }} server
run: |
sh start_${{ matrix.server }}.sh
pgrep ${{ matrix.server }}_server
- name: Benchmark ${{ matrix.server }}
run: |
chmod u+x ./tquic_client
./tquic_client https://tquic_benchmark:4433/file_${{ matrix.file }} --connect-to 127.0.0.1:4433 --threads ${{ matrix.conn }} --max-concurrent-conns 1 --max-concurrent-requests ${{ matrix.stream }} --max-requests-per-conn 0 --total-requests-per-thread 0 -d 600 --disable-stateless-reset --send-batch-size 1 --recv-udp-payload-size 1350 --send-udp-payload-size 1350 --log-level OFF > client.log 2>&1
cat client.log | grep "finished in" | awk '{print $4}' > benchmark_long_${{ matrix.server }}_${{ matrix.file }}_${{ matrix.conn }}_${{ matrix.stream }}.${{ needs.config.outputs.benchmark_date }}
- name: Stop ${{ matrix.server }} server
- name: Benchmark
run: |
killall ${{ matrix.server }}_server
sleep 1
chmod u+x ./tquic_client_bin/tquic_client
for((round=0;round<${{ needs.config.outputs.benchmark_rounds }};round++));do
for impl in ${{ needs.config.outputs.benchmark_impls }};do
sh ${impl}_server_bin/start_${impl}.sh
pgrep ${impl}_server
sleep 1
./tquic_client_bin/tquic_client https://tquic_benchmark:4433/file_${{ matrix.file }} --connect-to 127.0.0.1:4433 --threads ${{ matrix.conn }} --max-concurrent-conns 1 --max-concurrent-requests ${{ matrix.stream }} --max-requests-per-conn 0 --total-requests-per-thread 0 -d ${{ needs.config.outputs.benchmark_duration }} --disable-stateless-reset --send-batch-size 1 --recv-udp-payload-size 1350 --send-udp-payload-size 1350 --log-level OFF > client.log 2>&1
cat client.log | grep "finished in" | awk '{print $4}' > benchmark_long_${impl}_${{ matrix.file }}_${{ matrix.conn }}_${{ matrix.stream }}.${round}.${{ needs.config.outputs.benchmark_date }}
killall ${impl}_server
sleep 1
done
done
- name: Upload benchmark result
uses: actions/upload-artifact@v4
with:
name: benchmark_long_${{ matrix.server }}_${{ matrix.file }}_${{ matrix.conn }}_${{ matrix.stream }}.${{ needs.config.outputs.benchmark_date }}
name: benchmark_long_${{ matrix.file }}_${{ matrix.conn }}_${{ matrix.stream }}.${{ needs.config.outputs.benchmark_date }}
path: benchmark_long_*
retention-days: 90

run_short_conn:
name: Run short connection scenario benchmark
needs: [ config, build_tquic, build_lsquic, gen_cert, gen_files ]
runs-on: ubuntu-latest
strategy:
matrix:
server: [ tquic, lsquic ]
steps:
- name: Download ${{ matrix.server }} server
uses: actions/download-artifact@v4
with:
name: ${{ matrix.server }}_server_bin
- name: Download cert
uses: actions/download-artifact@v4
with:
name: cert
- name: Download files
uses: actions/download-artifact@v4
with:
name: files
- name: Download tquic_client
- name: Download all
uses: actions/download-artifact@v4
with:
name: tquic_client_bin
- name: Display structure of downloaded files
run: ls -R
- name: Start ${{ matrix.server }} server
run: |
sh start_${{ matrix.server }}.sh
pgrep ${{ matrix.server }}_server
- name: Benchmark ${{ matrix.server }}
- name: Benchmark
run: |
chmod u+x ./tquic_client
./tquic_client https://tquic_benchmark:4433/file_1K --connect-to 127.0.0.1:4433 --threads 10 --max-concurrent-conns 1 --max-concurrent-requests 1 --max-requests-per-conn 1 --total-requests-per-thread 0 -d 600 --disable-stateless-reset --send-batch-size 1 --recv-udp-payload-size 1350 --send-udp-payload-size 1350 --log-level OFF > client.log 2>&1
cat client.log | grep "finished in" | awk '{print $4}' > benchmark_short_${{ matrix.server }}_1K_10_1.${{ needs.config.outputs.benchmark_date }}
- name: Stop ${{ matrix.server }} server
run: |
killall ${{ matrix.server }}_server
sleep 1
chmod u+x ./tquic_client_bin/tquic_client
for((round=0;round<${{ needs.config.outputs.benchmark_rounds }};round++));do
for impl in ${{ needs.config.outputs.benchmark_impls }};do
sh ${impl}_server_bin/start_${impl}.sh
pgrep ${impl}_server
sleep 1
./tquic_client_bin/tquic_client https://tquic_benchmark:4433/file_1K --connect-to 127.0.0.1:4433 --threads 10 --max-concurrent-conns 1 --max-concurrent-requests 1 --max-requests-per-conn 1 --total-requests-per-thread 0 -d ${{ needs.config.outputs.benchmark_duration }} --disable-stateless-reset --send-batch-size 1 --recv-udp-payload-size 1350 --send-udp-payload-size 1350 --log-level OFF > client.log 2>&1
cat client.log | grep "finished in" | awk '{print $4}' > benchmark_short_${impl}_1K_10_1.${round}.${{ needs.config.outputs.benchmark_date }}
killall ${impl}_server
sleep 1
done
done
- name: Upload benchmark result
uses: actions/upload-artifact@v4
with:
name: benchmark_short_${{ matrix.server }}_1K_10_1.${{ needs.config.outputs.benchmark_date }}
name: benchmark_short_1K_10_1.${{ needs.config.outputs.benchmark_date }}
path: benchmark_short_*
retention-days: 90

result:
runs-on: ubuntu-latest
needs: [ run_long_conn, run_short_conn ]
steps:
- name: Download all benchmark results
uses: actions/download-artifact@v4

- name: Display structure of downloaded files
run: ls -R

- name: Download plot tools
uses: actions/checkout@v4
- name: Download all
uses: actions/download-artifact@v4
with:
path: tools

path: benchmark_result
- name: Download latest benchmark history
working-directory: ./benchmark_result
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
SCENARIO=("long" "short")
LONG_FILE_SIZES=("15K" "50K" "2M")
SHORT_FILE_SIZES=("1K")
LONG_CONNS=10
SHORT_CONNS=10
LONG_STREAMS=(1 10)
SHORT_STREAMS=(1)
DAYS=90
for ((i=1; i<$DAYS; i++)); do
date=$(date -d "-$i day" +%Y-%m-%d)
download_cmd="gh run download"
for scen in "${SCENARIO[@]}"; do
if [ "$scen" == "long" ]; then
FILE_SIZES=("${LONG_FILE_SIZES[@]}")
CONNS=$LONG_CONNS
STREAMS=("${LONG_STREAMS[@]}")
else
FILE_SIZES=("${SHORT_FILE_SIZES[@]}")
CONNS=$SHORT_CONNS
STREAMS=("${SHORT_STREAMS[@]}")
fi
for size in "${FILE_SIZES[@]}"; do
for stream in "${STREAMS[@]}"; do
download_cmd+=" -n benchmark_${scen}_${size}_${CONNS}_${stream}.${date}"
done
done
done
echo "$download_cmd"
eval "$download_cmd" || echo ""
done
- name: Display structure of downloaded files
run: ls -R benchmark_result
- name: Install dependencies
run: |
sudo apt install python3-matplotlib
pip3 install prettytable termcolor
- name: Plot and print all benchmark results
run: python3 tools/.github/workflows/plot-benchmark.py .

run: python3 .github/workflows/plot-benchmark.py ./benchmark_result
- name: Store all benchmark results
uses: actions/upload-artifact@v4
with:
Expand Down

0 comments on commit 95e0877

Please sign in to comment.