Skip to content

Commit

Permalink
Raw MLIR bench
Browse files Browse the repository at this point in the history
  • Loading branch information
adam-smnk committed Jul 29, 2024
1 parent f1abac5 commit 386b668
Show file tree
Hide file tree
Showing 2 changed files with 99 additions and 1 deletion.
3 changes: 2 additions & 1 deletion tools/mlir_bench/ov_model_gen.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import argparse
import string
import sys
import os

import torch
import torch.nn as nn
Expand Down Expand Up @@ -243,4 +244,4 @@ def main():


if __name__ == '__main__':
sys.exit(main())
os._exit(main())
97 changes: 97 additions & 0 deletions tools/mlir_bench/ov_raw_mlir_bench.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@
#!/bin/bash
#
# Runs pure MLIR part of MLP benchmarks using TPP-MLIR
# This approach assumes that only one MLIR op is generated.
# For example, the whole graph is outlined to MLIR.

die_syntax() {
echo "Syntax: $0 [-t (f32|f16|bf16|...)] [-b (mlp)] [-D]"
echo ""
echo " -t: Optional data type"
echo " -b: Optional baseline model"
echo " -D: Set model shapes to dynamic"
exit 1
}

# Cmd-line opts
while getopts "t:b:D" arg; do
case ${arg} in
t)
DATA_TYPE=${OPTARG}
;;
b)
BASELINE_MODEL=${OPTARG}
;;
D)
IS_DYNAMIC=true
;;
?)
echo "Invalid option: ${OPTARG}"
die_syntax
;;
esac
done

OV_ROOT=$(git rev-parse --show-toplevel)
BENCH_ROOT=$(realpath ${OV_ROOT}/tools/mlir_bench)

MODEL_GEN=$(realpath ${BENCH_ROOT}/ov_model_gen.py)
BENCH_RUNNER=tpp-run

# Initial validation.
if ! [ -d ${OV_ROOT} ]; then
echo "Missing OV repo"
exit 1
fi
if ! [ -d ${BENCH_ROOT} ]; then
echo "Missing MLIR benchmark directory"
exit 1
fi
if ! [ -f ${MODEL_GEN} ]; then
echo "Missing model generator"
exit 1
fi
if ! [ "$(command -v ${BENCH_RUNNER})" ]; then
echo "Missing benchmark runner ${BENCH_RUNNER}"
exit 1
fi
if [ ${IS_DYNAMIC} ]; then
echo "Dynamic shapes are not supported by ${BENCH_RUNNER}"
exit 1
fi

# Kernel config.
INPUT_SIZES=( 1024 2048 4096 8192 )
OUTPUT_SIZES=( 128 256 512 )
if [ ! "${DATA_TYPE}" ]; then
DATA_TYPE="f32"
fi
MODEL_NAME="TPP_BENCH.xml"

for OUT_SIZE in "${OUTPUT_SIZES[@]}"; do
echo "MLP - OUT: ${OUT_SIZE} INS: ${INPUT_SIZES[@]}"
for IN_SIZE in "${INPUT_SIZES[@]}"; do
# Generate model.
# Generate model.
if [ "${BASELINE_MODEL}" ]; then
# Enable baseline model flag.
MODEL_CONFIG=(-b="${BASELINE_MODEL}[${OUT_SIZE},${OUT_SIZE},${IN_SIZE}]")
else
# Generate default PyTorch MLP.
MODEL_CONFIG=(-l="linear[${IN_SIZE},${OUT_SIZE}] relu[]")
fi
GEN_FLAGS=(-t ${DATA_TYPE} -n ${MODEL_NAME})
GEN_FLAGS+=(-p)
MODEL_OUT=$(python3 ${MODEL_GEN} "${MODEL_CONFIG[@]}" "${GEN_FLAGS[@]}" 2>&1)
if [ $? != 0 ]; then
echo "Failed to generate model"
exit 1
fi
# Run benchmark.
MLIR_IR=$(echo "${MODEL_OUT}" \
| awk '/Source MLIR:/{flag=1; next} /Target LLVM:/{flag=0} flag' \
| grep -vE '^[-]+$')
BENCH_FLAGS="-entry-point-result=void -e entry -seed 123 -n 10000"
echo "${MLIR_IR}" | ${BENCH_RUNNER} ${BENCH_FLAGS}
done
done

0 comments on commit 386b668

Please sign in to comment.