This commit is contained in:
louiscklaw
2025-01-31 22:10:02 +08:00
parent 97df42e0d5
commit 2627562070
2852 changed files with 748727 additions and 0 deletions

View File

@@ -0,0 +1,47 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- neural_compressor
- setup.py
- requirements.txt
- .azure-pipelines/code-scan.yml
- .azure-pipelines/scripts/codeScan
- .azure-pipelines/template/docker-template.yml
pool:
vmImage: "ubuntu-latest"
variables:
CODE_SCAN_LOG_PATH: ".azure-pipelines/scripts/codeScan/scanLog"
stages:
- stage: DocStyleCodeScan
displayName: DocStyle Code Scan
dependsOn: []
jobs:
- job: DocStyle
displayName: DocStyle
steps:
- template: template/code-scan-template.yml
parameters:
codeScanFileName: "pydocstyle"
uploadPath: "pydocstyle.log"
- stage: BanditCodeScan
displayName: Bandit Code Scan
dependsOn: []
jobs:
- job: Bandit
displayName: Bandit
steps:
- template: template/code-scan-template.yml
parameters:
codeScanFileName: "bandit"
uploadPath: "bandit.log"

View File

@@ -0,0 +1,45 @@
#
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG UBUNTU_VER=22.04
FROM ubuntu:${UBUNTU_VER} as devel
# See http://bugs.python.org/issue19846
ENV LANG C.UTF-8
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
python3 \
python3-pip \
python3-dev \
python3-distutils \
autoconf \
build-essential \
git \
libgl1-mesa-glx \
libglib2.0-0 \
numactl \
time \
wget \
bc \
vim
RUN ln -sf $(which python3) /usr/bin/python
RUN python -m pip install pip==24.0
RUN python -m pip install --no-cache-dir setuptools
RUN pip list
WORKDIR /

View File

@@ -0,0 +1,38 @@
#
# Copyright (c) 2022 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
ARG UBUNTU_VER=22.04
FROM ubuntu:${UBUNTU_VER} as devel
# See http://bugs.python.org/issue19846
ENV LANG C.UTF-8
RUN apt-get update && apt-get install -y --no-install-recommends --fix-missing \
aspell \
aspell-en \
python3 \
python3-pip \
python3-dev \
python3-distutils \
wget
RUN ln -sf $(which python3) /usr/bin/python
RUN python -m pip install --no-cache-dir \
bandit\
pyspelling\
pydocstyle
WORKDIR /

View File

@@ -0,0 +1,119 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- neural_compressor/common
- neural_compressor/torch
- examples/3.x_api/pytorch/nlp/huggingface_models/language-modeling/quantization/weight_only
- setup.py
- requirements_pt.txt
- .azure-pipelines/scripts/models
- .azure-pipelines/model-test-3x.yml
- .azure-pipelines/template/docker-template.yml
variables:
OUT_SCRIPT_PATH: $(Build.SourcesDirectory)/.azure-pipelines/scripts/models
SCRIPT_PATH: /neural-compressor/.azure-pipelines/scripts
parameters:
- name: PyTorch_Model_3X
displayName: Run PyTorch models?
type: boolean
default: true
- name: PyTorchModelList
type: object
default:
- opt_125m_woq_gptq_int4
- opt_125m_woq_gptq_nf4_dq_bnb
- opt_125m_woq_gptq_int4_dq_ggml
stages:
- stage: PyTorchModels
displayName: Run PyTorch Model
pool: ICX-16C
dependsOn: []
condition: and(succeeded(), eq('${{ parameters.PyTorch_Model_3X }}', 'true'))
jobs:
- ${{ each model in parameters.PyTorchModelList }}:
- job:
displayName: ${{ model }}
steps:
- template: template/model-template.yml
parameters:
modelName: ${{ model }}
framework: "pytorch"
APIVersion: "3x"
- stage: GenerateLogs
displayName: Generate Report
pool:
vmImage: "ubuntu-latest"
dependsOn: [PyTorchModels]
jobs:
- job: GenerateReport
steps:
- script: |
echo ${BUILD_SOURCESDIRECTORY}
rm -fr ${BUILD_SOURCESDIRECTORY} || sudo rm -fr ${BUILD_SOURCESDIRECTORY} || true
echo y | docker system prune --all
displayName: "Clean workspace"
- checkout: self
clean: true
displayName: "Checkout out Repo"
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: "**/*_summary.log"
path: $(OUT_SCRIPT_PATH)
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: "**/*_tuning_info.log"
path: $(OUT_SCRIPT_PATH)
- task: UsePythonVersion@0
displayName: "Use Python 3.10"
inputs:
versionSpec: "3.10"
- script: |
cd ${OUT_SCRIPT_PATH}
mkdir generated
mkdir last_generated
pip install requests
python -u collect_log_all.py --logs_dir $(OUT_SCRIPT_PATH) --output_dir generated --build_id=$(Build.BuildId)
displayName: "Collect all logs"
- task: DownloadPipelineArtifact@2
continueOnError: true
inputs:
source: "specific"
artifact: "FinalReport"
patterns: "**.log"
path: $(OUT_SCRIPT_PATH)/last_generated
project: $(System.TeamProject)
pipeline: "Model-Test"
runVersion: "specific"
runId: $(refer_buildId)
displayName: "Download last logs"
- script: |
echo "------ Generating final report.html ------"
cd ${OUT_SCRIPT_PATH}
/usr/bin/bash generate_report.sh --WORKSPACE generated --output_dir generated --last_logt_dir last_generated
displayName: "Generate report"
- task: PublishPipelineArtifact@1
inputs:
targetPath: $(OUT_SCRIPT_PATH)/generated
artifact: FinalReport
publishLocation: "pipeline"
displayName: "Publish report"
- script: |
if [ $(is_perf_reg) == 'true' ]; then
echo "Some benchmark regression occurred or the reference data need to be updated, please check artifacts and reports."
exit 1
fi
displayName: "Specify regression"

View File

@@ -0,0 +1,173 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- neural_compressor
- setup.py
- requirements.txt
- .azure-pipelines/model-test.yml
- .azure-pipelines/template/docker-template.yml
- .azure-pipelines/scripts/models
- examples/tensorflow/oob_models/quantization/ptq
- .azure-pipelines/model-test.yml
- .azure-pipelines/scripts/fwk_version.sh
- .azure-pipelines/scripts/install_nc.sh
exclude:
- test
- neural_compressor/common
- neural_compressor/torch
- neural_compressor/tensorflow
- neural_compressor/onnxrt
pool: MODEL_PERF_TEST_TF
variables:
OUT_SCRIPT_PATH: $(Build.SourcesDirectory)/.azure-pipelines/scripts/models
SCRIPT_PATH: /neural-compressor/.azure-pipelines/scripts
parameters:
- name: TensorFlow_Model
displayName: Run TensorFlow models?
type: boolean
default: true
- name: PyTorch_Model
displayName: Run PyTorch models?
type: boolean
default: true
- name: ONNX_Model
displayName: Run ONNX models?
type: boolean
default: true
- name: TensorFlowModelList
type: object
default:
- resnet50v1.5
- ssd_resnet50_v1
- name: PyTorchModelList
type: object
default:
- resnet18_fx
- name: ONNXModelList
type: object
default:
- resnet50-v1-12
stages:
- stage: TensorFlowModels
displayName: Run TensorFlow Model
pool: MODEL_PERF_TEST
dependsOn: []
condition: and(succeeded(), eq('${{ parameters.TensorFlow_Model }}', 'true'))
jobs:
- ${{ each model in parameters.TensorFlowModelList }}:
- job:
displayName: ${{ model }}
steps:
- template: template/model-template.yml
parameters:
modelName: ${{ model }}
framework: "tensorflow"
- stage: PyTorchModels
displayName: Run PyTorch Model
pool: MODEL_PERF_TEST
dependsOn: []
condition: and(succeeded(), eq('${{ parameters.PyTorch_Model }}', 'true'))
jobs:
- ${{ each model in parameters.PyTorchModelList }}:
- job:
displayName: ${{ model }}
steps:
- template: template/model-template.yml
parameters:
modelName: ${{ model }}
framework: "pytorch"
- stage: ONNXModels
displayName: Run ONNX Model
pool: MODEL_PERF_TEST
dependsOn: []
condition: and(succeeded(), eq('${{ parameters.ONNX_Model }}', 'true'))
jobs:
- ${{ each model in parameters.ONNXModelList }}:
- job:
displayName: ${{ model }}
steps:
- template: template/model-template.yml
parameters:
modelName: ${{ model }}
framework: "onnxrt"
- stage: GenerateLogs
displayName: Generate Report
pool:
vmImage: "ubuntu-latest"
dependsOn: [TensorFlowModels, PyTorchModels, ONNXModels]
jobs:
- job: GenerateReport
steps:
- script: |
echo ${BUILD_SOURCESDIRECTORY}
rm -fr ${BUILD_SOURCESDIRECTORY} || sudo rm -fr ${BUILD_SOURCESDIRECTORY} || true
echo y | docker system prune --all
displayName: "Clean workspace"
- checkout: self
clean: true
displayName: "Checkout out Repo"
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: "**/*_summary.log"
path: $(OUT_SCRIPT_PATH)
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: "**/*_tuning_info.log"
path: $(OUT_SCRIPT_PATH)
- task: UsePythonVersion@0
displayName: "Use Python 3.10"
inputs:
versionSpec: "3.10"
- script: |
cd ${OUT_SCRIPT_PATH}
mkdir generated
mkdir last_generated
pip install requests
python -u collect_log_all.py --logs_dir $(OUT_SCRIPT_PATH) --output_dir generated --build_id=$(Build.BuildId)
displayName: "Collect all logs"
- task: DownloadPipelineArtifact@2
continueOnError: true
inputs:
source: "specific"
artifact: "FinalReport"
patterns: "**.log"
path: $(OUT_SCRIPT_PATH)/last_generated
project: $(System.TeamProject)
pipeline: "Model-Test"
runVersion: "specific"
runId: $(refer_buildId)
displayName: "Download last logs"
- script: |
echo "------ Generating final report.html ------"
cd ${OUT_SCRIPT_PATH}
/usr/bin/bash generate_report.sh --WORKSPACE generated --output_dir generated --last_logt_dir last_generated
displayName: "Generate report"
- task: PublishPipelineArtifact@1
inputs:
targetPath: $(OUT_SCRIPT_PATH)/generated
artifact: FinalReport
publishLocation: "pipeline"
displayName: "Publish report"
- script: |
if [ $(is_perf_reg) == 'true' ]; then
echo "Some benchmark regression occurred or the reference data need to be updated, please check artifacts and reports."
exit 1
fi
displayName: "Specify regression"

View File

@@ -0,0 +1,81 @@
#!/bin/bash
# -------------- general approach start----------------
# 1. import this file:
# source path/change_color.sh
# 2. use COLOR/BG:
# $VARIABLE_NAME && out_put_content && $RESET
# 3. COLOR + BG:
# $COLOR/BG_VARIABLE_NAME && $BG/COLOR_VARIABLE_NAME && out_put_content && $RESET
# 4. custom
# abbreviation(change number)
# txt number range (30, 37)
# bg number range (40, 47)
# special effects number range (1, 7)
# echo -en \\E[number1 + ; + number2 + ; + number3 + m"
# e.g - BG_GRAY+LIGHT_RED = "echo -en \\E[47;31m"
# -------------- general approach end----------------==
# general setting
# ------------- light_color start----------------
# black
LIGHT_BLACK="echo -en \\E[30m"
# red
LIGHT_RED="echo -en \\E[31m"
# green
LIGHT_GREEN="echo -en \\E[32m"
# yellow
LIGHT_YELLOW="echo -en \\E[33m"
# blue
LIGHT_BLUE="echo -en \\E[34m"
# purple
LIGHT_PURPLE="echo -en \\E[35m"
# cyan
LIGHT_CYAN="echo -en \\E[36m"
# gray
LIGHT_GRAY="echo -en \\E[37m"
# ------------- light_color end----------------
# ------------- bold_color start----------------
# black
BOLD_BLACK="echo -en \\E[1;30m"
# red
BOLD_RED="echo -en \\E[1;31m"
# green
BOLD_GREEN="echo -en \\E[1;32m"
# yellow
BOLD_YELLOW="echo -en \\E[1;33m"
# blue
BOLD_BLUE="echo -en \\E[1;34m"
# purple
BOLD_PURPLE="echo -en \\E[1;35m"
# cyan
BOLD_CYAN="echo -en \\E[1;36m"
# gray
BOLD_GRAY="echo -en \\E[1;37m"
# ------------- bold_color end----------------
# ------------- background_color start----------------
# black
BG_BLACK="echo -en \\E[40m"
# red
BG_RED="echo -en \\E[41m"
# green
BG_GREEN="echo -en \\E[42m"
# yellow
BG_YELLOW="echo -en \\E[43m"
# blue
BG_BLUE="echo -en \\E[44m"
# purple
BG_PURPLE="echo -en \\E[45m"
# cyan
BG_CYAN="echo -en \\E[46m"
# gray
BG_GRAY="echo -en \\E[47m"
# ------------- background_color end----------------
# close
RESET="echo -en \\E[0m"

View File

@@ -0,0 +1,34 @@
#!/bin/bash
for var in "$@"
do
case $var in
--scan_module=*)
scan_module=$(echo $var |cut -f2 -d=)
;;
esac
done
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
RESET="echo -en \\E[0m \\n" # close color
log_dir="/neural-compressor/.azure-pipelines/scripts/codeScan/scanLog"
mkdir -p $log_dir
python -m bandit -r -lll -iii "/neural-compressor/${scan_module}" >$log_dir/bandit.log
exit_code=$?
$BOLD_YELLOW && echo " ----------------- Current bandit cmd start --------------------------" && $RESET
echo "python -m bandit -r -lll -iii /neural-compressor/${scan_module} > $log_dir/bandit.log"
$BOLD_YELLOW && echo " ----------------- Current bandit cmd end --------------------------" && $RESET
$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------"
cat $log_dir/bandit.log
$BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET
if [ ${exit_code} -ne 0 ]; then
$BOLD_RED && echo "Error!! Please Click on the artifact button to download and view Bandit error details." && $RESET
exit 1
fi
$BOLD_PURPLE && echo "Congratulations, Bandit check passed!" && $LIGHT_PURPLE && echo " You can click on the artifact button to see the log details." && $RESET
exit 0

View File

@@ -0,0 +1,15 @@
activ
ans
assertin
datas
ende
lates
masia
mutli
nd
ot
rouge
te
tne
ue
womens

View File

@@ -0,0 +1,43 @@
#!/bin/bash
for var in "$@"
do
case $var in
--scan_module=*)
scan_module=$(echo $var |cut -f2 -d=)
;;
esac
done
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
RESET="echo -en \\E[0m \\n" # close color
work_dir="/neural-compressor/.azure-pipelines/scripts/codeScan/pydocstyle"
log_dir="$work_dir/../scanLog"
mkdir -p $log_dir
scan_path="scan_path.txt"
exit_code=0
for line in $(cat ${work_dir}/${scan_path})
do
pydocstyle --convention=google $line >> $log_dir/pydocstyle.log
if [ $? -ne 0 ]; then
exit_code=1
fi
done
$BOLD_YELLOW && echo " ----------------- Current pydocstyle cmd start --------------------------" && $RESET
echo "pydocstyle --convention=google \$line > $log_dir/pydocstyle.log"
$BOLD_YELLOW && echo " ----------------- Current pydocstyle cmd end --------------------------" && $RESET
$BOLD_YELLOW && echo " ----------------- Current log file output start --------------------------"
cat $log_dir/pydocstyle.log
$BOLD_YELLOW && echo " ----------------- Current log file output end --------------------------" && $RESET
if [ ${exit_code} -ne 0 ]; then
$BOLD_RED && echo "Error!! Please Click on the artifact button to download and view DocStyle error details." && $RESET
exit 1
fi
$BOLD_PURPLE && echo "Congratulations, DocStyle check passed!" && $LIGHT_PURPLE && echo " You can click on the artifact button to see the log details." && $RESET
exit 0

View File

@@ -0,0 +1,27 @@
/neural-compressor/neural_compressor/adaptor/mxnet_utils
/neural-compressor/neural_compressor/adaptor/ox_utils
/neural-compressor/neural_compressor/adaptor/tensorflow.py
/neural-compressor/neural_compressor/adaptor/tf_utils
/neural-compressor/neural_compressor/algorithm
/neural-compressor/neural_compressor/benchmark.py
/neural-compressor/neural_compressor/config.py
/neural-compressor/neural_compressor/contrib
/neural-compressor/neural_compressor/experimental
/neural-compressor/neural_compressor/mix_precision.py
/neural-compressor/neural_compressor/model
/neural-compressor/neural_compressor/objective.py
/neural-compressor/neural_compressor/pruner
/neural-compressor/neural_compressor/quantization.py
/neural-compressor/neural_compressor/strategy
/neural-compressor/neural_compressor/training.py
/neural-compressor/neural_compressor/utils
/neural-compressor/neural_compressor/common
/neural-compressor/neural_compressor/tensorflow
/neural-compressor/neural_compressor/torch/algorithms/layer_wise
/neural-compressor/neural_compressor/torch/algorithms/mixed_precision
/neural-compressor/neural_compressor/torch/algorithms/mx_quant
/neural-compressor/neural_compressor/torch/algorithms/pt2e_quant
/neural-compressor/neural_compressor/torch/algorithms/smooth_quant
/neural-compressor/neural_compressor/torch/algorithms/static_quant
/neural-compressor/neural_compressor/torch/algorithms/weight_only
/neural-compressor/neural_compressor/torch/export

View File

@@ -0,0 +1,10 @@
#!/bin/bash
echo "export FWs version..."
export tensorflow_version='2.15.0-official'
export pytorch_version='2.5.1+cpu'
export torchvision_version='0.20.1'
export ipex_version='2.5.0+cpu'
export onnx_version='1.17.0'
export onnxruntime_version='1.20.0'
export mxnet_version='1.9.1'

View File

@@ -0,0 +1,31 @@
#!/bin/bash
echo -e "##[group]Install Neural Compressor ... "
cd /neural-compressor
if [[ $1 = *"3x_pt"* ]]; then
python -m pip install --no-cache-dir -r requirements_pt.txt
if [[ $1 = *"3x_pt_fp8"* ]]; then
pip uninstall neural_compressor_3x_pt -y || true
python setup.py pt bdist_wheel
else
echo -e "\n Install torch CPU ... "
pip install torch==2.5.1 --index-url https://download.pytorch.org/whl/cpu
python -m pip install intel-extension-for-pytorch==2.5.0 oneccl_bind_pt==2.5.0 --extra-index-url https://pytorch-extension.intel.com/release-whl/stable/cpu/us/
python -m pip install --no-cache-dir -r requirements.txt
python setup.py bdist_wheel
fi
pip install --no-deps dist/neural_compressor*.whl --force-reinstall
elif [[ $1 = *"3x_tf"* ]]; then
python -m pip install --no-cache-dir -r requirements.txt
python -m pip install --no-cache-dir -r requirements_tf.txt
python setup.py bdist_wheel
pip install dist/neural_compressor*.whl --force-reinstall
else
python -m pip install --no-cache-dir -r requirements.txt
python setup.py bdist_wheel
pip install dist/neural_compressor*.whl --force-reinstall
fi
echo -e "\n pip list after install Neural Compressor ... "
echo "##[endgroup]"
pip list

View File

@@ -0,0 +1,79 @@
import argparse
import os
import requests
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--logs_dir", type=str, default=".")
parser.add_argument("--output_dir", type=str, default=".")
parser.add_argument("--build_id", type=str, default="0")
args = parser.parse_args()
print(args)
def main():
file_dir = args.logs_dir
summary_content = ["OS;Platform;Framework;Version;Precision;Model;Mode;Type;BS;Value;Url\n"]
tuning_info_content = ["OS;Platform;Framework;Version;Model;Strategy;Tune_time\n"]
url_dict = parse_download_url()
# get full path of all files
for root, dirs, files in os.walk(file_dir):
for name in files:
file_name = os.path.join(root, name)
print(file_name)
if "_summary.log" in name:
for line in open(file_name, "r"):
if "linux" in line:
line = line.replace("<url>", parse_summary_log(line, url_dict))
summary_content.append(line)
if "_tuning_info.log" in name:
for line in open(file_name, "r"):
if "linux" in line:
line = line.replace("<url>", parse_tuning_log(line, url_dict))
tuning_info_content.append(line)
f = open(args.output_dir + "/summary.log", "a")
for summary in summary_content:
f.writelines(str(summary))
f2 = open(args.output_dir + "/tuning_info.log", "a")
for tuning_info in tuning_info_content:
f2.writelines(str(tuning_info))
def parse_tuning_log(line, url_dict):
"""Parsing {Framework}-{Model}-tune.log to get tuning result."""
result = line.split(";")
OS, Platform, Framework, Version, Model, Strategy, Tune_time, Tuning_trials, URL, __ = result
file_name = f"{Framework}-{Model}-tune.log"
download_url = url_dict.get(f"{Framework}_{Model}")
download_url = f"{download_url}{file_name}"
return download_url
def parse_summary_log(line, url_dict):
"""Parse {Framework}-{Model}-tune.log to get benchmarking accuracy result."""
result = line.split(";")
OS, Platform, Framework, Version, Precision, Model, Mode, Type, BS, Value, Url = result
file_name = f"{Framework}-{Model}-tune.log"
download_url = url_dict.get(f"{Framework}_{Model}")
download_url = f"{download_url}{file_name}"
return download_url
def parse_download_url():
"""Get azure artifact information."""
azure_artifact_api_url = (
f"https://dev.azure.com/lpot-inc/neural-compressor/_apis/build/builds/{args.build_id}/artifacts?api-version=5.1"
)
azure_artifacts_data = dict(requests.get(azure_artifact_api_url).json().items())
artifact_count = azure_artifacts_data.get("count")
artifact_value = azure_artifacts_data.get("value")
url_dict = {}
for item in artifact_value:
artifact_download_url = item.get("resource").get("downloadUrl")
artifact_download_url = f"{artifact_download_url[:-3]}file&subPath=%2F"
url_dict[item.get("name")] = artifact_download_url
return url_dict
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,309 @@
import argparse
import os
import re
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--framework", type=str, required=True)
parser.add_argument("--fwk_ver", type=str, required=True)
parser.add_argument("--model", type=str, required=True)
parser.add_argument("--logs_dir", type=str, default=".")
parser.add_argument("--output_dir", type=str, default=".")
parser.add_argument("--build_id", type=str, default="0")
parser.add_argument("--stage", type=str, default="collect_log")
parser.add_argument("--gap", type=float, default=0.05)
parser.add_argument("--inc_new_api", type=str, default="")
args = parser.parse_args()
print("====== collecting model test log =======")
OS = "linux"
PLATFORM = "icx"
URL = (
"https://dev.azure.com/lpot-inc/neural-compressor/_build/results?buildId="
+ args.build_id
+ "&view=artifacts&pathAsName=false&type=publishedArtifacts"
)
OOB_MODEL_LIST = ["darknet19", "densenet-121", "resnet-101"]
def get_model_tuning_dict_results():
tuning_result_dict = {}
if os.path.exists(tuning_log):
print("tuning log found")
tmp = {"fp32_acc": 0, "int8_acc": 0, "tuning_trials": 0}
with open(tuning_log, "r") as f:
for line in f:
parse_tuning_line(line, tmp)
print(tmp)
tuning_result_dict = {
"OS": OS,
"Platform": PLATFORM,
"Framework": args.framework,
"Version": args.fwk_ver,
"Model": args.model,
"Strategy": tmp.get("strategy", "basic"),
"Tune_time": tmp.get("tune_time"),
}
benchmark_accuracy_result_dict = {
"int8": {
"OS": OS,
"Platform": PLATFORM,
"Framework": args.framework,
"Version": args.fwk_ver,
"Model": args.model,
"Mode": "Inference",
"Type": "Accuracy",
"BS": 1,
"Value": tmp.get("int8_acc"),
"Url": URL,
},
"fp32": {
"OS": OS,
"Platform": PLATFORM,
"Framework": args.framework,
"Version": args.fwk_ver,
"Model": args.model,
"Mode": "Inference",
"Type": "Accuracy",
"BS": 1,
"Value": tmp.get("fp32_acc"),
"Url": URL,
},
}
return tuning_result_dict, benchmark_accuracy_result_dict
else:
return {}, {}
def get_model_benchmark_dict_results():
benchmark_performance_result_dict = {"int8": {}, "fp32": {}}
for precision in ["int8", "fp32"]:
throughput = 0.0
bs = 1
for root, dirs, files in os.walk(args.logs_dir):
for name in files:
file_name = os.path.join(root, name)
if "performance-" + precision in name:
for line in open(file_name, "r"):
result = parse_perf_line(line)
if result.get("throughput"):
throughput += result.get("throughput")
if result.get("batch_size"):
bs = result.get("batch_size")
benchmark_performance_result_dict[precision] = {
"OS": OS,
"Platform": PLATFORM,
"Framework": args.framework,
"Version": args.fwk_ver,
"Model": args.model,
"Mode": "Inference",
"Type": "Performance",
"BS": 1,
"Value": throughput,
"Url": URL,
}
return benchmark_performance_result_dict
def get_refer_data():
refer_log = os.path.join(f"{args.logs_dir}_refer_log", f"{args.framework}_{args.model}_summary.log")
result = {}
if os.path.exists(refer_log):
with open(refer_log, "r") as f:
lines = f.readlines()
keys = lines[0].split(";")
values = [lines[i].split(";") for i in range(1, len(lines))]
for value in values:
precision = value[keys.index("Precision")]
Type = value[keys.index("Type")]
result[f"{precision}_{Type}"] = (
float(value[keys.index("Value")]) if value[keys.index("Value")] != "unknown" else "unknown"
)
return result
else:
print(f"refer log file: {refer_log} not found")
return 0
def collect_log():
results = []
tuning_infos = []
print(f"quantization log dir is {tuning_log}")
# get model tuning results
if os.path.exists(tuning_log):
print("quantization log found")
tmp = {"fp32_acc": 0, "int8_acc": 0, "tuning_trials": 0}
with open(tuning_log, "r") as f:
for line in f:
parse_tuning_line(line, tmp)
print(tmp)
# oob_model no need acc
if (args.model in OOB_MODEL_LIST) and args.framework == "tensorflow":
tmp["fp32_acc"], tmp["int8_acc"] = "unknown", "unknown"
# set for 3x woq models
if args.inc_new_api.split("_")[0] == "3x":
tmp["fp32_acc"], tmp["tuning_trials"], tmp["strategy"] = "unknown", "", ""
if "acc_bs" in tmp:
acc_bs = tmp["acc_bs"]
else:
acc_bs = 1
results.append(
"{};{};{};{};FP32;{};Inference;Accuracy;{};{};{}\n".format(
OS, PLATFORM, args.framework, args.fwk_ver, args.model, acc_bs, tmp["fp32_acc"], "<url>"
)
)
results.append(
"{};{};{};{};INT8;{};Inference;Accuracy;{};{};{}\n".format(
OS, PLATFORM, args.framework, args.fwk_ver, args.model, acc_bs, tmp["int8_acc"], "<url>"
)
)
tuning_infos.append(
";".join(
[
OS,
PLATFORM,
args.framework,
args.fwk_ver,
args.model,
tmp.get("strategy", "basic"),
str(tmp["tune_time"]),
str(tmp["tuning_trials"]),
"<url>",
f"{round(tmp['max_mem_size'] / tmp['total_mem_size'] * 100, 4)}%",
]
)
+ "\n"
)
# get model benchmark results
if args.inc_new_api.split("_")[0] != "3x":
for precision in ["int8", "fp32"]:
throughput = 0.0
bs = 1
for root, dirs, files in os.walk(args.logs_dir):
for name in files:
file_name = os.path.join(root, name)
print(file_name)
if "performance-" + precision in name:
for line in open(file_name, "r"):
result = parse_perf_line(line)
if result.get("throughput"):
throughput += result.get("throughput")
if result.get("batch_size"):
bs = result.get("batch_size")
results.append(
"{};{};{};{};{};{};Inference;Performance;{};{};{}\n".format(
OS, PLATFORM, args.framework, args.fwk_ver, precision.upper(), args.model, bs, throughput, URL
)
)
# write model logs
f = open(args.output_dir + "/" + args.framework + "_" + args.model + "_summary.log", "a")
f.writelines("OS;Platform;Framework;Version;Precision;Model;Mode;Type;BS;Value;Url\n")
for result in results:
f.writelines(str(result))
f2 = open(args.output_dir + "/" + args.framework + "_" + args.model + "_tuning_info.log", "a")
f2.writelines("OS;Platform;Framework;Version;Model;Strategy;Tune_time\n")
for tuning_info in tuning_infos:
f2.writelines(str(tuning_info))
def parse_tuning_line(line, tmp):
tuning_strategy = re.search(r"Tuning strategy:\s+([A-Za-z]+)", line)
if tuning_strategy and tuning_strategy.group(1):
tmp["strategy"] = tuning_strategy.group(1)
baseline_acc = re.search(
r"FP32 baseline is:\s+\[Accuracy:\s(\d+(\.\d+)?), Duration \(seconds\):\s*(\d+(\.\d+)?)\]", line
)
if baseline_acc and baseline_acc.group(1):
tmp["fp32_acc"] = float(baseline_acc.group(1))
tuned_acc = re.search(
r"Best tune result is:\s+\[Accuracy:\s(\d+(\.\d+)?), Duration \(seconds\):\s(\d+(\.\d+)?)\]", line
)
if tuned_acc and tuned_acc.group(1):
tmp["int8_acc"] = float(tuned_acc.group(1))
if args.inc_new_api.split("_")[0] == "3x":
quant_acc = re.search(r"Accuracy:\s+(\d+(\.\d+)?)", line)
if quant_acc and quant_acc.group(1):
tmp["int8_acc"] = float(quant_acc.group(1))
batch_size = re.search(r"Batch size = ([0-9]+)", line)
if batch_size and batch_size.group(1):
tmp["acc_bs"] = int(batch_size.group(1))
tune_trial = re.search(r"Tune \d*\s*result is:", line)
if tune_trial:
tmp["tuning_trials"] += 1
tune_time = re.search(r"Tuning time spend:\s+(\d+(\.\d+)?)s", line)
if tune_time and tune_time.group(1):
tmp["tune_time"] = int(tune_time.group(1))
fp32_model_size = re.search(r"The input model size is:\s+(\d+(\.\d+)?)", line)
if fp32_model_size and fp32_model_size.group(1):
tmp["fp32_model_size"] = int(fp32_model_size.group(1))
int8_model_size = re.search(r"The output model size is:\s+(\d+(\.\d+)?)", line)
if int8_model_size and int8_model_size.group(1):
tmp["int8_model_size"] = int(int8_model_size.group(1))
total_mem_size = re.search(r"Total resident size\D*([0-9]+)", line)
if total_mem_size and total_mem_size.group(1):
tmp["total_mem_size"] = float(total_mem_size.group(1))
max_mem_size = re.search(r"Maximum resident set size\D*([0-9]+)", line)
if max_mem_size and max_mem_size.group(1):
tmp["max_mem_size"] = float(max_mem_size.group(1))
def parse_perf_line(line):
perf_data = {}
throughput = re.search(r"Throughput:\s+(\d+(\.\d+)?)", line)
if throughput and throughput.group(1):
perf_data.update({"throughput": float(throughput.group(1))})
batch_size = re.search(r"Batch size = ([0-9]+)", line)
if batch_size and batch_size.group(1):
perf_data.update({"batch_size": int(batch_size.group(1))})
return perf_data
def check_status(precision, precision_upper, check_accuracy=False):
performance_result = get_model_benchmark_dict_results()
current_performance = performance_result.get(precision).get("Value")
refer_performance = refer.get(f"{precision_upper}_Performance")
print(f"current_performance_data = {current_performance:.3f}, refer_performance_data = {refer_performance:.3f}")
assert (refer_performance - current_performance) / refer_performance <= args.gap
if check_accuracy:
_, accuracy_result = get_model_tuning_dict_results()
current_accuracy = accuracy_result.get(precision).get("Value")
refer_accuracy = refer.get(f"{precision_upper}_Accuracy")
print(f"current_accuracy_data = {current_accuracy:.3f}, refer_accuarcy_data = {refer_accuracy:.3f}")
assert abs(current_accuracy - refer_accuracy) <= 0.001
if __name__ == "__main__":
tuning_log = os.path.join(args.logs_dir, f"{args.framework}-{args.model}-tune.log")
refer = get_refer_data()
if args.stage == "collect_log":
collect_log()
elif args.stage == "int8_benchmark" and refer:
check_status("int8", "INT8")
elif args.stage == "fp32_benchmark" and refer:
check_status("fp32", "FP32")
elif not refer:
print("skip check status")
else:
raise ValueError(f"{args.stage} does not exist")

View File

@@ -0,0 +1,147 @@
#!/bin/bash
set -eo pipefail
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
for i in "$@"; do
case $i in
--yaml=*)
yaml=$(echo $i | sed "s/${PATTERN}//")
;;
--framework=*)
framework=$(echo $i | sed "s/${PATTERN}//")
;;
--fwk_ver=*)
fwk_ver=$(echo $i | sed "s/${PATTERN}//")
;;
--torch_vision_ver=*)
torch_vision_ver=$(echo $i | sed "s/${PATTERN}//")
;;
--model=*)
model=$(echo $i | sed "s/${PATTERN}//")
;;
--model_src_dir=*)
model_src_dir=$(echo $i | sed "s/${PATTERN}//")
;;
--dataset_location=*)
dataset_location=$(echo $i | sed "s/${PATTERN}//")
;;
--batch_size=*)
batch_size=$(echo $i | sed "s/${PATTERN}//")
;;
--strategy=*)
strategy=$(echo $i | sed "s/${PATTERN}//")
;;
--new_benchmark=*)
new_benchmark=$(echo $i | sed "s/${PATTERN}//")
;;
--inc_new_api=*)
inc_new_api=$(echo $i | sed "s/${PATTERN}//")
;;
*)
echo "Parameter $i not recognized."
exit 1
;;
esac
done
SCRIPTS_PATH="/neural-compressor/.azure-pipelines/scripts/models"
log_dir="/neural-compressor/.azure-pipelines/scripts/models"
if [[ "${inc_new_api}" == "3x"* ]]; then
WORK_SOURCE_DIR="/neural-compressor/examples/3.x_api/${framework}"
git clone https://github.com/intel/intel-extension-for-transformers.git /itrex
cd /itrex
pip install -r requirements.txt
pip install -v .
else
WORK_SOURCE_DIR="/neural-compressor/examples/${framework}"
fi
$BOLD_YELLOW && echo "processing ${framework}-${fwk_ver}-${model}" && $RESET
$BOLD_YELLOW && echo "======= creat log_dir =========" && $RESET
if [ -d "${log_dir}/${model}" ]; then
$BOLD_GREEN && echo "${log_dir}/${model} already exists, don't need to mkdir." && $RESET
else
$BOLD_GREEN && echo "no log dir ${log_dir}/${model}, create." && $RESET
cd ${log_dir}
mkdir ${model}
fi
$BOLD_YELLOW && echo "====== install requirements ======" && $RESET
/bin/bash /neural-compressor/.azure-pipelines/scripts/install_nc.sh ${inc_new_api}
mkdir -p ${WORK_SOURCE_DIR}
cd ${WORK_SOURCE_DIR}
if [[ "${inc_new_api}" == "false" ]]; then
echo "copy old api examples to workspace..."
git clone -b old_api_examples https://github.com/intel/neural-compressor.git old-lpot-models
cd old-lpot-models
git branch
cd -
rm -rf ${model_src_dir}
mkdir -p ${model_src_dir}
cp -r old-lpot-models/examples/${framework}/${model_src_dir} ${WORK_SOURCE_DIR}/${model_src_dir}/../
fi
cd ${model_src_dir}
if [[ "${fwk_ver}" != "latest" ]]; then
pip install ruamel.yaml==0.17.40
pip install psutil
pip install protobuf==4.23.4
if [[ "${framework}" == "tensorflow" ]]; then
if [[ "${fwk_ver}" == *"-official" ]]; then
pip install tensorflow==${fwk_ver%-official}
else
pip install intel-tensorflow==${fwk_ver}
fi
elif [[ "${framework}" == "pytorch" ]]; then
pip install torch==${fwk_ver} --index-url https://download.pytorch.org/whl/cpu
pip install torchvision==${torch_vision_ver} --index-url https://download.pytorch.org/whl/cpu
elif [[ "${framework}" == "onnxrt" ]]; then
pip install onnx==1.15.0
pip install onnxruntime==${fwk_ver}
fi
fi
if [ -f "requirements.txt" ]; then
sed -i '/neural-compressor/d' requirements.txt
if [ "${framework}" == "onnxrt" ]; then
sed -i '/^onnx>=/d;/^onnx==/d;/^onnxruntime>=/d;/^onnxruntime==/d' requirements.txt
fi
if [ "${framework}" == "tensorflow" ]; then
sed -i '/tensorflow==/d;/tensorflow$/d' requirements.txt
sed -i '/^intel-tensorflow/d' requirements.txt
fi
if [ "${framework}" == "pytorch" ]; then
sed -i '/torch==/d;/torch$/d;/torchvision==/d;/torchvision$/d' requirements.txt
fi
n=0
until [ "$n" -ge 5 ]; do
python -m pip install -r requirements.txt && break
n=$((n + 1))
sleep 5
done
pip list
else
$BOLD_RED && echo "Not found requirements.txt file." && $RESET
fi
if [[ "${inc_new_api}" == "false" ]]; then
$BOLD_YELLOW && echo "======== update yaml config ========" && $RESET
$BOLD_YELLOW && echo -e "\nPrint origin yaml..." && $RESET
cat ${yaml}
python ${SCRIPTS_PATH}/update_yaml_config.py \
--yaml=${yaml} \
--framework=${framework} \
--dataset_location=${dataset_location} \
--batch_size=${batch_size} \
--strategy=${strategy} \
--new_benchmark=${new_benchmark} \
--multi_instance='true'
$BOLD_YELLOW && echo -e "\nPrint updated yaml... " && $RESET
cat ${yaml}
fi

View File

@@ -0,0 +1,625 @@
#!/bin/bash
# WORKSPACE=.
# summaryLog=summary.log
# summaryLogLast=summary.log
# tuneLog=tuning_info.log
# tuneLogLast=tuning_info.log
# overview_log=summary_overview.log
# coverage_summary=coverage_summary.log
# nc_code_lines_summary=nc_code_lines_summary.csv
# engine_code_lines_summary=engine_code_lines_summary.csv
#lines_coverage_threshold=80
#branches_coverage_threshold=75
#
#pass_status="<td style=\"background-color:#90EE90\">Pass</td>"
#fail_status="<td style=\"background-color:#FFD2D2\">Fail</td>"
#verify_status="<td style=\"background-color:#f2ea0a\">Verify</td>"
# shellcheck disable=SC2120
while [[ $# -gt 0 ]];do
key=${1}
case ${key} in
-w|--WORKSPACE)
WORKSPACE=${2}
shift 2
;;
--script_path)
script_path=${2}
shift 2
;;
--output_dir)
output_dir=${2}
shift 2
;;
--last_logt_dir)
last_logt_dir=${2}
shift 2
;;
*)
shift
;;
esac
done
echo "workspace: ${WORKSPACE}"
echo "script_path: ${script_path}"
summaryLog="${WORKSPACE}/summary.log"
tuneLog="${WORKSPACE}/tuning_info.log"
echo "summaryLog: ${summaryLog}"
echo "tuneLog: ${tuneLog}"
echo "last_logt_dir: ${last_logt_dir}"
summaryLogLast="${last_logt_dir}/summary.log"
tuneLogLast="${last_logt_dir}/tuning_info.log"
echo "summaryLogLast: ${summaryLogLast}"
echo "tuneLogLast: ${tuneLogLast}"
ghprbPullId=${SYSTEM_PULLREQUEST_PULLREQUESTNUMBER}
MR_source_branch=${SYSTEM_PULLREQUEST_SOURCEBRANCH}
MR_source_repo=${SYSTEM_PULLREQUEST_SOURCEREPOSITORYURI}
MR_target_branch=${SYSTEM_PULLREQUEST_TARGETBRANCH}
repo_url=${BUILD_REPOSITORY_URI}
source_commit_id=${BUILD_SOURCEVERSION}
build_id=${BUILD_BUILDID}
echo "MR_source_branch: ${MR_source_branch}"
echo "MR_source_repo: ${MR_source_repo}"
echo "MR_target_branch: ${MR_target_branch}"
echo "repo_url: ${repo_url}"
echo "commit_id: ${source_commit_id}"
echo "ghprbPullId: ${ghprbPullId}"
echo "build_id: ${build_id}"
function main {
generate_html_head
generate_html_body
generate_results
generate_html_footer
}
function generate_inference {
# echo "Generating inference"
awk -v framework="${framework}" -v fw_version="${fw_version}" -v model="${model}" -v os="${os}" -v platform=${platform} -F ';' '
BEGINE {
fp32_perf_bs = "nan";
fp32_perf_value = "nan";
fp32_perf_url = "nan";
fp32_acc_bs = "nan";
fp32_acc_value = "nan";
fp32_acc_url = "nan";
int8_perf_bs = "nan";
int8_perf_value = "nan";
int8_perf_url = "nan";
int8_acc_bs = "nan";
int8_acc_value = "nan";
int8_acc_url = "nan";
}{
if($1 == os && $2 == platform && $3 == framework && $4 == fw_version && $6 == model) {
// FP32
if($5 == "FP32") {
// Performance
if($8 == "Performance") {
fp32_perf_bs = $9;
fp32_perf_value = $10;
fp32_perf_url = $11;
}
// Accuracy
if($8 == "Accuracy") {
fp32_acc_bs = $9;
fp32_acc_value = $10;
fp32_acc_url = $11;
}
}
// INT8
if($5 == "INT8") {
// Performance
if($8 == "Performance") {
int8_perf_bs = $9;
int8_perf_value = $10;
int8_perf_url = $11;
}
// Accuracy
if($8 == "Accuracy") {
int8_acc_bs = $9;
int8_acc_value = $10;
int8_acc_url = $11;
}
}
}
}END {
printf("%s;%s;%s;%s;", int8_perf_bs,int8_perf_value,int8_acc_bs,int8_acc_value);
printf("%s;%s;%s;%s;", fp32_perf_bs,fp32_perf_value,fp32_acc_bs,fp32_acc_value);
printf("%s;%s;%s;%s;", int8_perf_url,int8_acc_url,fp32_perf_url,fp32_acc_url);
}
' "$1"
}
function generate_html_core {
echo "--- current values ---"
echo ${current_values}
echo "--- last values ---"
echo ${last_values}
tuning_strategy=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLog} |awk -F';' '{print $6}')
tuning_time=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLog} |awk -F';' '{print $7}')
tuning_count=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLog} |awk -F';' '{print $8}')
tuning_log=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLog} |awk -F';' '{print $9}')
echo "<tr><td rowspan=3>${platform}</td><td rowspan=3>${os}</td><td rowspan=3>${framework}</td><td rowspan=3>${fw_version}</td><td rowspan=3>${model}</td><td>New</td><td><a href=${tuning_log}>${tuning_strategy}</a></td>" >> ${output_dir}/report.html
echo "<td><a href=${tuning_log}>${tuning_time}</a></td><td><a href=${tuning_log}>${tuning_count}</a></td>" >> ${output_dir}/report.html
tuning_strategy=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLogLast} |awk -F';' '{print $6}')
tuning_time=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLogLast} |awk -F';' '{print $7}')
tuning_count=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLogLast} |awk -F';' '{print $8}')
tuning_log=$(grep "^${os};${platform};${framework};${fw_version};${model};" ${tuneLogLast} |awk -F';' '{print $9}')
echo |awk -F ';' -v current_values="${current_values}" -v last_values="${last_values}" \
-v tuning_strategy="${tuning_strategy}" -v tuning_time="${tuning_time}" \
-v tuning_count="${tuning_count}" -v tuning_log="${tuning_log}" -F ';' '
function abs(x) { return x < 0 ? -x : x }
function show_new_last(batch, link, value, metric) {
if(value ~/[1-9]/) {
if (metric == "perf" || metric == "ratio") {
printf("<td>%s</td> <td><a href=%s>%.2f</a></td>\n",batch,link,value);
} else {
printf("<td>%s</td> <td><a href=%s>%.2f%</a></td>\n",batch,link,value*100);
}
} else {
if(link == "" || value == "N/A" || value == "unknown") {
printf("<td></td> <td></td>\n");
} else {
printf("<td>%s</td> <td><a href=%s>Failure</a></td>\n",batch,link);
}
}
}
function compare_current(int8_result, fp32_result, metric) {
if(int8_result ~/[1-9]/ && fp32_result ~/[1-9]/) {
if(metric == "acc") {
target = (int8_result - fp32_result) / fp32_result;
if(target >= -0.01) {
printf("<td rowspan=3 style=\"background-color:#90EE90\">%.2f %</td>", target*100);
}else if(target < -0.05) {
printf("<td rowspan=3 style=\"background-color:#FFD2D2\">%.2f %</td>", target*100);
job_status = "fail"
}else{
printf("<td rowspan=3>%.2f %</td>", target*100);
}
}else if(metric == "perf") {
target = int8_result / fp32_result;
if(target >= 1.5) {
printf("<td style=\"background-color:#90EE90\">%.2f</td>", target);
}else if(target < 1) {
printf("<td style=\"background-color:#FFD2D2\">%.2f</td>", target);
perf_status = "fail"
}else{
printf("<td>%.2f</td>", target);
}
}
else {
target = int8_result / fp32_result;
if(target >= 2) {
printf("<td rowspan=3 style=\"background-color:#90EE90\">%.2f</td>", target);
}else if(target < 1) {
printf("<td rowspan=3 style=\"background-color:#FFD2D2\">%.2f</td>", target);
job_status = "fail"
}else{
printf("<td rowspan=3>%.2f</td>", target);
}
}
}else {
printf("<td rowspan=3></td>");
}
}
function compare_result(new_result, previous_result, metric) {
if (new_result ~/[1-9]/ && previous_result ~/[1-9]/) {
if(metric == "acc") {
target = new_result - previous_result;
if(target > -0.00001 && target < 0.00001) {
status_png = "background-color:#90EE90";
} else {
status_png = "background-color:#FFD2D2";
job_status = "fail"
}
printf("<td style=\"%s\" colspan=2>%.2f %</td>", status_png, target*100);
} else {
target = new_result / previous_result;
if(target <= 1.084 && target >= 0.915) {
status_png = "background-color:#90EE90";
} else {
status_png = "background-color:#FFD2D2";
perf_status = "fail"
}
printf("<td style=\"%s\" colspan=2>%.2f</td>", status_png, target);
}
} else {
if((new_result == nan && previous_result == nan) || new_result == "unknown"){
printf("<td class=\"col-cell col-cell3\" colspan=2></td>");
} else{
job_status = "fail"
status_png = "background-color:#FFD2D2";
printf("<td style=\"%s\" colspan=2></td>", status_png);
}
}
}
function compare_ratio(int8_perf_value, fp32_perf_value, last_int8_perf_value, last_fp32_perf_value) {
if (int8_perf_value ~/[1-9]/ && fp32_perf_value ~/[1-9]/ && last_int8_perf_value ~/[1-9]/ && last_fp32_perf_value ~/[1-9]/) {
new_result = int8_perf_value / fp32_perf_value
previous_result = last_int8_perf_value / last_fp32_perf_value
target = new_result / previous_result;
if (target <= 1.084 && target >= 0.915) {
status_png = "background-color:#90EE90";
} else {
status_png = "background-color:#FFD2D2";
ratio_status = "fail"
}
printf("<td style=\"%s\">%.2f</td>", status_png, target);
} else {
if (new_result == nan && previous_result == nan) {
printf("<td class=\"col-cell col-cell3\"></td>");
} else {
if (new_result == nan) {
ratio_status = "fail"
status_png = "background-color:#FFD2D2";
printf("<td style=\"%s\"></td>", status_png);
} else {
printf("<td class=\"col-cell col-cell3\"></td>");
}
}
}
}
BEGIN {
job_status = "pass"
perf_status = "pass"
ratio_status = "pass"
// issue list
jira_mobilenet = "https://jira01.devtools.intel.com/browse/PADDLEQ-384";
jira_resnext = "https://jira01.devtools.intel.com/browse/PADDLEQ-387";
jira_ssdmobilenet = "https://jira01.devtools.intel.com/browse/PADDLEQ-406";
}{
// Current values
split(current_values,current_value,";");
// Current
// INT8 Performance results
int8_perf_batch=current_value[1]
int8_perf_value=current_value[2]
int8_perf_url=current_value[9]
show_new_last(int8_perf_batch, int8_perf_url, int8_perf_value, "perf");
// INT8 Accuracy results
int8_acc_batch=current_value[3]
int8_acc_value=current_value[4]
int8_acc_url=current_value[10]
show_new_last(int8_acc_batch, int8_acc_url, int8_acc_value, "acc");
// FP32 Performance results
fp32_perf_batch=current_value[5]
fp32_perf_value=current_value[6]
fp32_perf_url=current_value[11]
show_new_last(fp32_perf_batch, fp32_perf_url, fp32_perf_value, "perf");
// FP32 Accuracy results
fp32_acc_batch=current_value[7]
fp32_acc_value=current_value[8]
fp32_acc_url=current_value[12]
show_new_last(fp32_acc_batch, fp32_acc_url, fp32_acc_value, "acc");
// Compare Current
compare_current(int8_perf_value, fp32_perf_value, "perf");
compare_current(int8_acc_value, fp32_acc_value, "acc");
// Last values
split(last_values,last_value,";");
// Last
printf("</tr>\n<tr><td>Last</td><td><a href=%4$s>%1$s</a></td><td><a href=%4$s>%2$s</a></td><td><a href=%4$s>%3$s</a></td>", tuning_strategy, tuning_time, tuning_count, tuning_log);
// Show last INT8 Performance results
last_int8_perf_batch=last_value[1]
last_int8_perf_value=last_value[2]
last_int8_perf_url=last_value[9]
show_new_last(last_int8_perf_batch, last_int8_perf_url, last_int8_perf_value, "perf");
// Show last INT8 Accuracy results
last_int8_acc_batch=last_value[3]
last_int8_acc_value=last_value[4]
last_int8_acc_url=last_value[10]
show_new_last(last_int8_acc_batch, last_int8_acc_url, last_int8_acc_value, "acc");
// Show last FP32 Performance results
last_fp32_perf_batch=last_value[5]
last_fp32_perf_value=last_value[6]
last_fp32_perf_url=last_value[11]
show_new_last(last_fp32_perf_batch, last_fp32_perf_url, last_fp32_perf_value, "perf");
// Show last FP32 Accuracy results
last_fp32_acc_batch=last_value[7]
last_fp32_acc_value=last_value[8]
last_fp32_acc_url=last_value[12]
show_new_last(last_fp32_acc_batch, last_fp32_acc_url, last_fp32_acc_value, "acc");
compare_current(last_int8_perf_value, last_fp32_perf_value, "perf");
printf("</tr>")
// current vs last
printf("</tr>\n<tr><td>New/Last</td><td colspan=3 class=\"col-cell3\"></td>");
// Compare INT8 Performance results
compare_result(int8_perf_value, last_int8_perf_value,"perf");
// Compare INT8 Accuracy results
compare_result(int8_acc_value, last_int8_acc_value, "acc");
// Compare FP32 Performance results
compare_result(fp32_perf_value, last_fp32_perf_value, "perf");
// Compare FP32 Accuracy results
compare_result(fp32_acc_value, last_fp32_acc_value, "acc");
// Compare INT8 FP32 Performance ratio
compare_ratio(int8_perf_value, fp32_perf_value, last_int8_perf_value, last_fp32_perf_value);
printf("</tr>\n");
status = (perf_status == "fail" && ratio_status == "fail") ? "fail" : "pass"
status = (job_status == "fail") ? "fail" : status
} END{
printf("\n%s", status);
}
' >> ${output_dir}/report.html
job_state=$(tail -1 ${WORKSPACE}/report.html)
sed -i '$s/.*//' ${WORKSPACE}/report.html
if [ ${job_state} == 'fail' ]; then
echo "====== perf_reg ======"
echo "##vso[task.setvariable variable=is_perf_reg]true"
fi
}
function generate_results {
echo "Generating tuning results"
oses=$(sed '1d' ${summaryLog} |cut -d';' -f1 | awk '!a[$0]++')
echo ${oses}
for os in ${oses[@]}
do
platforms=$(sed '1d' ${summaryLog} |grep "^${os}" |cut -d';' -f2 | awk '!a[$0]++')
echo ${platforms}
for platform in ${platforms[@]}
do
frameworks=$(sed '1d' ${summaryLog} |grep "^${os};${platform}" |cut -d';' -f3 | awk '!a[$0]++')
echo ${frameworks}
for framework in ${frameworks[@]}
do
fw_versions=$(sed '1d' ${summaryLog} |grep "^${os};${platform};${framework}" |cut -d';' -f4 | awk '!a[$0]++')
echo ${fw_versions}
for fw_version in ${fw_versions[@]}
do
models=$(sed '1d' ${summaryLog} |grep "^${os};${platform};${framework};${fw_version}" |cut -d';' -f6 | awk '!a[$0]++')
echo ${models}
for model in ${models[@]}
do
echo "--- processing model ---"
echo ${model}
current_values=$(generate_inference ${summaryLog})
echo "| current value |"
echo ${current_values}
last_values=$(generate_inference ${summaryLogLast})
echo "| last value |"
echo ${last_values}
generate_html_core ${current_values} ${last_values}
done
done
done
done
done
}
function generate_html_body {
MR_TITLE=''
Test_Info_Title=''
Test_Info=''
if [ "${qtools_branch}" == "" ];
then
commit_id=$(echo ${ghprbActualCommit} |awk '{print substr($1,1,7)}')
MR_TITLE="[ <a href='${repo_url}/pull/${ghprbPullId}'>PR-${ghprbPullId}</a> ]"
Test_Info_Title="<th colspan="2">Source Branch</th> <th colspan="4">Target Branch</th> <th colspan="4">Commit</th> "
Test_Info="<td colspan="2">${MR_source_branch}</td> <td colspan="4"><a href='${repo_url}/tree/${MR_target_branch}'>${MR_target_branch}</a></td> <td colspan="4"><a href='${MR_source_repo}/commit/${source_commit_id}'>${source_commit_id:0:6}</a></td>"
else
Test_Info_Title="<th colspan="4">Test Branch</th> <th colspan="4">Commit ID</th> "
Test_Info="<th colspan="4">${qtools_branch}</th> <th colspan="4">${qtools_commit}</th> "
fi
cat >> ${output_dir}/report.html << eof
<body>
<div id="main">
<h1 align="center">Neural Compressor Tuning Tests ${MR_TITLE}
[ <a
href="https://dev.azure.com/lpot-inc/neural-compressor/_build/results?buildId=${build_id}">Job-${build_id}</a>
]</h1>
<h1 align="center">Test Status: ${Jenkins_job_status}</h1>
<h2>Summary</h2>
<table class="features-table">
<tr>
<th>Repo</th>
${Test_Info_Title}
</tr>
<tr>
<td><a href="https://github.com/intel/neural-compressor">neural-compressor</a></td>
${Test_Info}
</tr>
</table>
eof
echo "Generating benchmarks table"
cat >> ${output_dir}/report.html << eof
<h2>Benchmark</h2>
<table class="features-table">
<tr>
<th rowspan="2">Platform</th>
<th rowspan="2">System</th>
<th rowspan="2">Framework</th>
<th rowspan="2">Version</th>
<th rowspan="2">Model</th>
<th rowspan="2">VS</th>
<th rowspan="2">Tuning<br>Strategy</th>
<th rowspan="2">Tuning<br>Time(s)</th>
<th rowspan="2">Tuning<br>Count</th>
<th colspan="4">INT8</th>
<th colspan="4">FP32</th>
<th colspan="2" class="col-cell col-cell1 col-cellh">Ratio</th>
</tr>
<tr>
<th>bs</th>
<th>imgs/s</th>
<th>bs</th>
<th>top1</th>
<th>bs</th>
<th>imgs/s</th>
<th>bs</th>
<th>top1</th>
<th class="col-cell col-cell1">Throughput<br><font size="2px">INT8/FP32</font></th>
<th class="col-cell col-cell1">Accuracy<br><font size="2px">(INT8-FP32)/FP32</font></th>
</tr>
eof
}
function generate_html_footer {
cat >> ${output_dir}/report.html << eof
<tr>
<td colspan="17"><font color="#d6776f">Note: </font>All data tested on Azure Cloud.</td>
<td colspan="2" class="col-cell col-cell1 col-cellf"></td>
</tr>
</table>
</div>
</body>
</html>
eof
}
function generate_html_head {
cat > ${output_dir}/report.html << eof
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head>
<meta http-equiv="content-type" content="text/html; charset=ISO-8859-1">
<title>Daily Tests - TensorFlow - Jenkins</title>
<style type="text/css">
body
{
margin: 0;
padding: 0;
background: white no-repeat left top;
}
#main
{
// width: 100%;
margin: 20px auto 10px auto;
background: white;
-moz-border-radius: 8px;
-webkit-border-radius: 8px;
padding: 0 30px 30px 30px;
border: 1px solid #adaa9f;
-moz-box-shadow: 0 2px 2px #9c9c9c;
-webkit-box-shadow: 0 2px 2px #9c9c9c;
}
.features-table
{
width: 100%;
margin: 0 auto;
border-collapse: separate;
border-spacing: 0;
text-shadow: 0 1px 0 #fff;
color: #2a2a2a;
background: #fafafa;
background-image: -moz-linear-gradient(top, #fff, #eaeaea, #fff); /* Firefox 3.6 */
background-image: -webkit-gradient(linear,center bottom,center top,from(#fff),color-stop(0.5, #eaeaea),to(#fff));
font-family: Verdana,Arial,Helvetica
}
.features-table th,td
{
text-align: center;
height: 25px;
line-height: 25px;
padding: 0 8px;
border: 1px solid #cdcdcd;
box-shadow: 0 1px 0 white;
-moz-box-shadow: 0 1px 0 white;
-webkit-box-shadow: 0 1px 0 white;
white-space: nowrap;
}
.no-border th
{
box-shadow: none;
-moz-box-shadow: none;
-webkit-box-shadow: none;
}
.col-cell
{
text-align: center;
width: 150px;
font: normal 1em Verdana, Arial, Helvetica;
}
.col-cell3
{
background: #efefef;
background: rgba(144,144,144,0.15);
}
.col-cell1, .col-cell2
{
background: #B0C4DE;
background: rgba(176,196,222,0.3);
}
.col-cellh
{
font: bold 1.3em 'trebuchet MS', 'Lucida Sans', Arial;
-moz-border-radius-topright: 10px;
-moz-border-radius-topleft: 10px;
border-top-right-radius: 10px;
border-top-left-radius: 10px;
border-top: 1px solid #eaeaea !important;
}
.col-cellf
{
font: bold 1.4em Georgia;
-moz-border-radius-bottomright: 10px;
-moz-border-radius-bottomleft: 10px;
border-bottom-right-radius: 10px;
border-bottom-left-radius: 10px;
border-bottom: 1px solid #dadada !important;
}
</style>
</head>
eof
}
main

View File

@@ -0,0 +1,123 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmarking: measure the model performance with the objective settings."""
import argparse
import subprocess
import numpy as np
parser = argparse.ArgumentParser(allow_abbrev=False)
parser.add_argument("--cores_per_instance", type=int, required=True)
parser.add_argument("--num_of_instance", type=int, required=True)
args = parser.parse_args()
def get_architecture():
"""Get the architecture name of the system."""
p1 = subprocess.Popen("lscpu", stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p2 = subprocess.Popen(["grep", "Architecture"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["cut", "-d", ":", "-f2"], stdin=p2.stdout, stdout=subprocess.PIPE)
res = None
for line in iter(p3.stdout.readline, b""):
res = line.decode("utf-8").strip()
return res
def get_threads_per_core():
"""Get the threads per core."""
p1 = subprocess.Popen("lscpu", stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p2 = subprocess.Popen(["grep", "Thread(s) per core"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["cut", "-d", ":", "-f2"], stdin=p2.stdout, stdout=subprocess.PIPE)
res = None
for line in iter(p3.stdout.readline, b""):
res = line.decode("utf-8").strip()
return res
def get_threads():
"""Get the list of threads."""
p1 = subprocess.Popen(["cat", "/proc/cpuinfo"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p2 = subprocess.Popen(["grep", "processor"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["cut", "-d", ":", "-f2"], stdin=p2.stdout, stdout=subprocess.PIPE)
res = []
for line in iter(p3.stdout.readline, b""):
res.append(line.decode("utf-8").strip())
return res
def get_physical_ids():
"""Get the list of sockets."""
p1 = subprocess.Popen(["cat", "/proc/cpuinfo"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p2 = subprocess.Popen(["grep", "physical id"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["cut", "-d", ":", "-f2"], stdin=p2.stdout, stdout=subprocess.PIPE)
res = []
for line in iter(p3.stdout.readline, b""):
res.append(line.decode("utf-8").strip())
return res
def get_core_ids():
"""Get the ids list of the cores."""
p1 = subprocess.Popen(["cat", "/proc/cpuinfo"], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
p2 = subprocess.Popen(["grep", "core id"], stdin=p1.stdout, stdout=subprocess.PIPE)
p3 = subprocess.Popen(["cut", "-d", ":", "-f2"], stdin=p2.stdout, stdout=subprocess.PIPE)
res = []
for line in iter(p3.stdout.readline, b""):
res.append(line.decode("utf-8").strip())
return res
def get_bounded_threads(core_ids, threads, sockets):
"""Return the threads id list that we will bind instances to."""
res = []
existing_socket_core_list = []
for idx, x in enumerate(core_ids):
socket_core = sockets[idx] + ":" + x
if socket_core not in existing_socket_core_list:
res.append(int(threads[idx]))
existing_socket_core_list.append(socket_core)
return res
def config_instance(cores_per_instance, num_of_instance):
"""Configure the multi-instance commands and trigger benchmark with sub process."""
core = []
if get_architecture() == "aarch64" and int(get_threads_per_core()) > 1:
raise OSError("Currently no support on AMD with hyperthreads")
else:
bounded_threads = get_bounded_threads(get_core_ids(), get_threads(), get_physical_ids())
for i in range(0, num_of_instance):
if get_architecture() == "x86_64":
core_list_idx = np.arange(0, cores_per_instance) + i * cores_per_instance
core_list = np.array(bounded_threads)[core_list_idx]
else:
core_list = np.arange(0, cores_per_instance) + i * cores_per_instance
core.append(core_list.tolist())
for i in range(len(core)):
core[i] = [str(j) for j in core[i]]
core[i] = ",".join(core[i])
core = ";".join(core)
return core
if __name__ == "__main__":
print(config_instance(args.cores_per_instance, args.num_of_instance))

View File

@@ -0,0 +1,140 @@
#!/bin/bash
set -eo pipefail
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
SCRIPTS_PATH="/neural-compressor/.azure-pipelines/scripts/models"
for i in "$@"; do
case $i in
--framework=*)
framework=`echo $i | sed "s/${PATTERN}//"`;;
--model=*)
model=`echo $i | sed "s/${PATTERN}//"`;;
--input_model=*)
input_model=`echo $i | sed "s/${PATTERN}//"`;;
--benchmark_cmd=*)
benchmark_cmd=`echo $i | sed "s/${PATTERN}//"`;;
--log_dir=*)
log_dir=`echo $i | sed "s/${PATTERN}//"`;;
--new_benchmark=*)
new_benchmark=`echo $i | sed "s/${PATTERN}//"`;;
--precision=*)
precision=`echo $i | sed "s/${PATTERN}//"`;;
--stage=*)
stage=`echo $i | sed "s/${PATTERN}//"`;;
--USE_TUNE_ACC=*)
USE_TUNE_ACC=`echo $i | sed "s/${PATTERN}//"`;;
--PERF_STABLE_CHECK=*)
PERF_STABLE_CHECK=`echo $i | sed "s/${PATTERN}//"`;;
--BUILD_BUILDID=*)
BUILD_BUILDID=`echo $i | sed "s/${PATTERN}//"`;;
*)
echo "Parameter $i not recognized."; exit 1;;
esac
done
$BOLD_YELLOW && echo "-------- run_benchmark_common --------" && $RESET
main() {
# run accuracy
echo "USE_TUNE_ACC=${USE_TUNE_ACC}, PERF_STABLE_CHECK=${PERF_STABLE_CHECK}"
# USE_TUNE_ACC==true means using accuracy results from tuning log
if [ ${USE_TUNE_ACC} == "false" ]; then
run_accuracy
fi
# run performance
if [ ${PERF_STABLE_CHECK} == "false" ]; then
run_performance
else
max_loop=3
gap=(0.05 0.05 0.1)
for ((iter = 0; iter < ${max_loop}; iter++)); do
run_performance
{
check_perf_gap ${gap[${iter}]}
exit_code=$?
} || true
if [ ${exit_code} -ne 0 ]; then
$BOLD_RED && echo "FAILED with performance gap!!" && $RESET
else
$BOLD_GREEN && echo "SUCCEED!!" && $RESET
break
fi
done
exit ${exit_code}
fi
}
function check_perf_gap() {
python -u ${SCRIPTS_PATH}/collect_log_model.py \
--framework=${framework} \
--fwk_ver=${fwk_ver} \
--model=${model} \
--logs_dir="${log_dir}" \
--output_dir="${log_dir}" \
--build_id=${BUILD_BUILDID} \
--stage=${stage} \
--gap=$1
}
function run_performance() {
cmd="${benchmark_cmd} --input_model=${input_model}"
if [ "${new_benchmark}" == "true" ]; then
$BOLD_YELLOW && echo "run with internal benchmark..." && $RESET
export NUM_OF_INSTANCE=2
export CORES_PER_INSTANCE=4
eval ${cmd} 2>&1 | tee ${log_dir}/${framework}-${model}-performance-${precision}.log
else
$BOLD_YELLOW && echo "run with external multiInstance benchmark..." && $RESET
multiInstance
fi
}
function run_accuracy() {
$BOLD_YELLOW && echo "run tuning accuracy in precision ${precision}" && $RESET
eval "${benchmark_cmd} --input_model=${input_model} --mode=accuracy" 2>&1 | tee ${log_dir}/${framework}-${model}-accuracy-${precision}.log
}
function multiInstance() {
ncores_per_socket=${ncores_per_socket:=$(lscpu | grep 'Core(s) per socket' | cut -d: -f2 | xargs echo -n)}
$BOLD_YELLOW && echo "Executing multi instance benchmark" && $RESET
ncores_per_instance=4
$BOLD_YELLOW && echo "ncores_per_socket=${ncores_per_socket}, ncores_per_instance=${ncores_per_instance}" && $RESET
logFile="${log_dir}/${framework}-${model}-performance-${precision}"
benchmark_pids=()
core_list=$(python ${SCRIPTS_PATH}/new_benchmark.py --cores_per_instance=${ncores_per_instance} --num_of_instance=$(expr $ncores_per_socket / $ncores_per_instance))
core_list=($(echo $core_list | tr ';' ' '))
for ((j = 0; $j < $(expr $ncores_per_socket / $ncores_per_instance); j = $(($j + 1)))); do
$BOLD_GREEN && echo "OMP_NUM_THREADS=${ncores_per_instance} numactl --localalloc --physcpubind=${core_list[${j}]} ${cmd} 2>&1 | tee ${logFile}-${ncores_per_socket}-${ncores_per_instance}-${j}.log &" && $RESET
OMP_NUM_THREADS=${ncores_per_instance} numactl --localalloc --physcpubind=${core_list[${j}]} ${cmd} 2>&1 | tee ${logFile}-${ncores_per_socket}-${ncores_per_instance}-${j}.log &
benchmark_pids+=($!)
done
status="SUCCESS"
for pid in "${benchmark_pids[@]}"; do
wait $pid
exit_code=$?
$BOLD_YELLOW && echo "Detected exit code: ${exit_code}" && $RESET
if [ ${exit_code} == 0 ]; then
$BOLD_GREEN && echo "Process ${pid} succeeded" && $RESET
else
$BOLD_RED && echo "Process ${pid} failed" && $RESET
status="FAILURE"
fi
done
$BOLD_YELLOW && echo "Benchmark process status: ${status}" && $RESET
if [ ${status} == "FAILURE" ]; then
$BOLD_RED && echo "Benchmark process returned non-zero exit code." && $RESET
exit 1
fi
}
main

View File

@@ -0,0 +1,177 @@
#!/bin/bash
set -eo pipefail
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
for i in "$@"
do
case $i in
--yaml=*)
yaml=`echo $i | sed "s/${PATTERN}//"`;;
--framework=*)
framework=`echo $i | sed "s/${PATTERN}//"`;;
--fwk_ver=*)
fwk_ver=`echo $i | sed "s/${PATTERN}//"`;;
--torch_vision_ver=*)
torch_vision_ver=`echo $i | sed "s/${PATTERN}//"`;;
--model=*)
model=`echo $i | sed "s/${PATTERN}//"`;;
--model_src_dir=*)
model_src_dir=`echo $i | sed "s/${PATTERN}//"`;;
--dataset_location=*)
dataset_location=`echo $i | sed "s/${PATTERN}//"`;;
--input_model=*)
input_model=`echo $i | sed "s/${PATTERN}//"`;;
--batch_size=*)
batch_size=`echo $i | sed "s/${PATTERN}//"`;;
--strategy=*)
strategy=`echo $i | sed "s/${PATTERN}//"`;;
--new_benchmark=*)
new_benchmark=`echo $i | sed "s/${PATTERN}//"`;;
--inc_new_api=*)
inc_new_api=`echo $i | sed "s/${PATTERN}//"`;;
--tuning_cmd=*)
tuning_cmd=`echo $i | sed "s/${PATTERN}//"`;;
--benchmark_cmd=*)
benchmark_cmd=`echo $i | sed "s/${PATTERN}//"`;;
--mode=*)
mode=`echo $i | sed "s/${PATTERN}//"`;;
--USE_TUNE_ACC=*)
USE_TUNE_ACC=`echo $i | sed "s/${PATTERN}//"`;;
--PERF_STABLE_CHECK=*)
PERF_STABLE_CHECK=`echo $i | sed "s/${PATTERN}//"`;;
--BUILD_BUILDID=*)
BUILD_BUILDID=`echo $i | sed "s/${PATTERN}//"`;;
*)
echo "Parameter $i not recognized."; exit 1;;
esac
done
function check_results() {
local control_phrase=$1
if [ $(grep "${control_phrase}" ${log_dir}/${model}/${framework}-${model}-tune.log | wc -l) == 0 ];then
$BOLD_RED && echo "====== Quantization FAILED!! ======" && $RESET; exit 1
fi
}
log_dir="/neural-compressor/.azure-pipelines/scripts/models"
SCRIPTS_PATH="/neural-compressor/.azure-pipelines/scripts/models"
if [[ "${inc_new_api}" == "3x"* ]]; then
WORK_SOURCE_DIR="/neural-compressor/examples/3.x_api/${framework}"
else
WORK_SOURCE_DIR="/neural-compressor/examples/${framework}"
fi
$BOLD_YELLOW && echo "processing ${framework}-${fwk_ver}-${model}" && $RESET
if [ "${mode}" == "env_setup" ]; then
/bin/bash env_setup.sh \
--yaml=${yaml} \
--framework=${framework} \
--fwk_ver=${fwk_ver} \
--torch_vision_ver=${torch_vision_ver} \
--model=${model} \
--model_src_dir=${model_src_dir} \
--dataset_location=${dataset_location} \
--batch_size=${batch_size} \
--strategy=${strategy} \
--new_benchmark=${new_benchmark} \
--inc_new_api="${inc_new_api}"
elif [ "${mode}" == "tuning" ]; then
if [ "${framework}" == "onnxrt" ]; then
output_model=${log_dir}/${model}/${framework}-${model}-tune.onnx
elif [ "${framework}" == "tensorflow" ]; then
output_model=${log_dir}/${model}/${framework}-${model}-tune.pb
fi
[[ ${output_model} ]] && tuning_cmd="${tuning_cmd} --output_model=${output_model}"
cd ${WORK_SOURCE_DIR}/${model_src_dir}
# for int4 models add "--accuracy" to run tuning after quantize
if [[ "${model}" == *"int4"* ]]; then
sed -i "s|--quantize|--quantize --accuracy --load|g" run_quant.sh
fi
$BOLD_YELLOW && echo "workspace ${WORK_SOURCE_DIR}/${model_src_dir}" && $RESET
$BOLD_YELLOW && echo "tuning_cmd is === ${tuning_cmd}" && $RESET
$BOLD_YELLOW && echo "======== run tuning ========" && $RESET
/bin/bash ${SCRIPTS_PATH}/run_tuning_common.sh \
--tuning_cmd="${tuning_cmd}" \
--strategy=${strategy} \
2>&1 | tee -a ${log_dir}/${model}/${framework}-${model}-tune.log
$BOLD_YELLOW && echo "====== check tuning status. ======" && $RESET
if [[ "${inc_new_api}" == "3x"* ]]; then
control_phrase_1="Preparation end."
check_results $control_phrase_1
control_phrase_2="Conversion end."
check_results $control_phrase_2
else
control_phrase="model which meet accuracy goal."
check_results $control_phrase
if [ $(grep "${control_phrase}" ${log_dir}/${model}/${framework}-${model}-tune.log | grep "Not found" | wc -l) == 1 ];then
$BOLD_RED && echo "====== Quantization FAILED!! ======" && $RESET; exit 1
fi
fi
$BOLD_GREEN && echo "====== Quantization SUCCEED!! ======" && $RESET
elif [ "${mode}" == "fp32_benchmark" ]; then
cd ${WORK_SOURCE_DIR}/${model_src_dir}
$BOLD_YELLOW && echo "workspace ${WORK_SOURCE_DIR}/${model_src_dir}" && $RESET
$BOLD_YELLOW && echo "benchmark_cmd is ${benchmark_cmd}" && $RESET
$BOLD_YELLOW && echo "====== run benchmark fp32 =======" && $RESET
/bin/bash ${SCRIPTS_PATH}/run_benchmark_common.sh \
--framework=${framework} \
--model=${model} \
--input_model=${input_model} \
--benchmark_cmd="${benchmark_cmd}" \
--log_dir="${log_dir}/${model}" \
--new_benchmark=${new_benchmark} \
--precision="fp32" \
--stage=${mode} \
--USE_TUNE_ACC=${USE_TUNE_ACC} \
--PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \
--BUILD_BUILDID=${BUILD_BUILDID}
elif [ "${mode}" == "int8_benchmark" ]; then
cd ${WORK_SOURCE_DIR}/${model_src_dir}
$BOLD_YELLOW && echo "workspace ${WORK_SOURCE_DIR}/${model_src_dir}" && $RESET
$BOLD_YELLOW && echo "benchmark_cmd is ${benchmark_cmd}" && $RESET
$BOLD_YELLOW && echo "====== run benchmark int8 =======" && $RESET
if [[ "${framework}" == "onnxrt" ]]; then
model_name="${log_dir}/${model}/${framework}-${model}-tune.onnx"
elif [[ "${framework}" == "tensorflow" ]]; then
model_name="${log_dir}/${model}/${framework}-${model}-tune.pb"
elif [[ "${framework}" == "pytorch" ]]; then
model_name=${input_model}
benchmark_cmd="${benchmark_cmd} --int8=true"
fi
/bin/bash ${SCRIPTS_PATH}/run_benchmark_common.sh \
--framework=${framework} \
--model=${model} \
--input_model="${model_name}" \
--benchmark_cmd="${benchmark_cmd}" \
--log_dir="${log_dir}/${model}" \
--new_benchmark=${new_benchmark} \
--precision="int8" \
--stage=${mode} \
--USE_TUNE_ACC=${USE_TUNE_ACC} \
--PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \
--BUILD_BUILDID=${BUILD_BUILDID}
elif [ "${mode}" == "collect_log" ]; then
cd ${WORK_SOURCE_DIR}/${model_src_dir}
$BOLD_YELLOW && echo "workspace ${WORK_SOURCE_DIR}/${model_src_dir}" && $RESET
$BOLD_YELLOW && echo "====== collect logs of model ${model} =======" && $RESET
if [ "${framework}" == "pytorch" ] && [ "${fwk_ver}" == "latest" ]; then
fwk_ver=$(python -c "import torch; print(torch.__version__)")
fi
python -u ${SCRIPTS_PATH}/collect_log_model.py \
--framework=${framework} \
--fwk_ver=${fwk_ver} \
--model=${model} \
--logs_dir="${log_dir}/${model}" \
--output_dir="${log_dir}/${model}" \
--build_id=${BUILD_BUILDID} \
--stage=${mode} \
--inc_new_api="${inc_new_api}"
$BOLD_YELLOW && echo "====== Finish collect logs =======" && $RESET
fi

View File

@@ -0,0 +1,62 @@
#!/bin/bash
set -eo pipefail
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
for i in "$@"
do
case $i in
--model=*)
model=`echo $i | sed "s/${PATTERN}//"`;;
--mode=*)
mode=`echo $i | sed "s/${PATTERN}//"`;;
--USE_TUNE_ACC=*)
USE_TUNE_ACC=`echo $i | sed "s/${PATTERN}//"`;;
--PERF_STABLE_CHECK=*)
PERF_STABLE_CHECK=`echo $i | sed "s/${PATTERN}//"`;;
--BUILD_BUILDID=*)
BUILD_BUILDID=`echo $i | sed "s/${PATTERN}//"`;;
*)
echo "Parameter $i not recognized."; exit 1;;
esac
done
echo "specify FWs version..."
source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest'
FRAMEWORK="onnxrt"
FRAMEWORK_VERSION=${onnxruntime_version}
inc_new_api=false
# ======== set up config for onnxrt models ========
if [ "${model}" == "resnet50-v1-12" ]; then
model_src_dir="image_recognition/onnx_model_zoo/resnet50/quantization/ptq_static"
dataset_location="/tf_dataset2/datasets/imagenet/ImagenetRaw/ImagenetRaw_small_5000/ILSVRC2012_img_val"
input_model="/tf_dataset2/models/onnx/resnet50-v1-12/resnet50-v1-12.onnx"
yaml="resnet50_v1_5.yaml"
strategy="basic"
batch_size=1
new_benchmark=true
inc_new_api=true
tuning_cmd="bash run_quant.sh --input_model=${input_model} --dataset_location=${dataset_location}"
benchmark_cmd="bash run_benchmark.sh --config=${yaml} --mode=performance --dataset_location=${dataset_location}"
fi
/bin/bash run_model_trigger_common.sh \
--yaml=${yaml} \
--framework=${FRAMEWORK} \
--fwk_ver=${FRAMEWORK_VERSION} \
--model=${model} \
--model_src_dir=${model_src_dir} \
--dataset_location=${dataset_location} \
--input_model=${input_model} \
--batch_size=${batch_size} \
--strategy=${strategy} \
--new_benchmark=${new_benchmark} \
--tuning_cmd="${tuning_cmd}" \
--benchmark_cmd="${benchmark_cmd}" \
--inc_new_api="${inc_new_api}" \
--mode=${mode} \
--USE_TUNE_ACC=${USE_TUNE_ACC} \
--PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \
--BUILD_BUILDID=${BUILD_BUILDID}

View File

@@ -0,0 +1,100 @@
#!/bin/bash
set -eo pipefail
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
for i in "$@"
do
case $i in
--model=*)
model=`echo $i | sed "s/${PATTERN}//"`;;
--mode=*)
mode=`echo $i | sed "s/${PATTERN}//"`;;
--USE_TUNE_ACC=*)
USE_TUNE_ACC=`echo $i | sed "s/${PATTERN}//"`;;
--PERF_STABLE_CHECK=*)
PERF_STABLE_CHECK=`echo $i | sed "s/${PATTERN}//"`;;
--BUILD_BUILDID=*)
BUILD_BUILDID=`echo $i | sed "s/${PATTERN}//"`;;
*)
echo "Parameter $i not recognized."; exit 1;;
esac
done
dataset_location=""
input_model=""
yaml=""
strategy=""
batch_size=""
new_benchmark=true
inc_new_api=true
benchmark_cmd=""
# ======== set up config for pytorch models ========
if [ "${model}" == "resnet18" ]; then
model_src_dir="image_recognition/torchvision_models/quantization/ptq/cpu/eager"
dataset_location="/tf_dataset2/datasets/mini-imageraw"
input_model=""
yaml="conf.yaml"
strategy="bayesian"
batch_size=1
new_benchmark=false
inc_new_api=false
tuning_cmd="bash run_tuning.sh --topology=resnet18 --dataset_location=${dataset_location} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --topology=resnet18 --dataset_location=${dataset_location} --mode=benchmark --batch_size=${batch_size} --iters=500"
elif [ "${model}" == "resnet18_fx" ]; then
model_src_dir="image_recognition/torchvision_models/quantization/ptq/cpu/fx/"
dataset_location="/tf_dataset2/datasets/mini-imageraw"
input_model="resnet18"
yaml=""
strategy="basic"
batch_size=1
new_benchmark=true
inc_new_api=true
tuning_cmd="bash run_quant.sh --topology=resnet18 --dataset_location=${dataset_location} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --topology=resnet18 --dataset_location=${dataset_location} --mode=performance --batch_size=${batch_size} --iters=500"
elif [ "${model}" == "opt_125m_woq_gptq_int4" ]; then
model_src_dir="nlp/huggingface_models/language-modeling/quantization/weight_only"
inc_new_api=3x_pt
tuning_cmd="bash run_quant.sh --topology=opt_125m_woq_gptq_int4"
elif [ "${model}" == "opt_125m_woq_gptq_nf4_dq_bnb" ]; then
model_src_dir="nlp/huggingface_models/language-modeling/quantization/weight_only"
inc_new_api=3x_pt
tuning_cmd="bash run_quant.sh --topology=opt_125m_woq_gptq_nf4_dq_bnb"
elif [ "${model}" == "opt_125m_woq_gptq_int4_dq_ggml" ]; then
model_src_dir="nlp/huggingface_models/language-modeling/quantization/weight_only"
inc_new_api=3x_pt
tuning_cmd="bash run_quant.sh --topology=opt_125m_woq_gptq_int4_dq_ggml"
fi
echo "Specify FWs version..."
FRAMEWORK="pytorch"
source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest'
if [[ "${inc_new_api}" == "3x"* ]]; then
FRAMEWORK_VERSION="latest"
export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH
else
FRAMEWORK_VERSION=${pytorch_version}
TORCH_VISION_VERSION=${torchvision_version}
fi
/bin/bash run_model_trigger_common.sh \
--yaml=${yaml} \
--framework=${FRAMEWORK} \
--fwk_ver=${FRAMEWORK_VERSION} \
--torch_vision_ver=${TORCH_VISION_VERSION} \
--model=${model} \
--model_src_dir=${model_src_dir} \
--dataset_location=${dataset_location} \
--input_model=${input_model} \
--batch_size=${batch_size} \
--strategy=${strategy} \
--new_benchmark=${new_benchmark} \
--tuning_cmd="${tuning_cmd}" \
--benchmark_cmd="${benchmark_cmd}" \
--inc_new_api="${inc_new_api}" \
--mode=${mode} \
--USE_TUNE_ACC=${USE_TUNE_ACC} \
--PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \
--BUILD_BUILDID=${BUILD_BUILDID}

View File

@@ -0,0 +1,118 @@
#!/bin/bash
set -eo pipefail
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
for i in "$@"
do
case $i in
--model=*)
model=`echo $i | sed "s/${PATTERN}//"`;;
--mode=*)
mode=`echo $i | sed "s/${PATTERN}//"`;;
--USE_TUNE_ACC=*)
USE_TUNE_ACC=`echo $i | sed "s/${PATTERN}//"`;;
--PERF_STABLE_CHECK=*)
PERF_STABLE_CHECK=`echo $i | sed "s/${PATTERN}//"`;;
--BUILD_BUILDID=*)
BUILD_BUILDID=`echo $i | sed "s/${PATTERN}//"`;;
*)
echo "Parameter $i not recognized."; exit 1;;
esac
done
echo "specify FWs version..."
source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh 'latest'
FRAMEWORK="tensorflow"
FRAMEWORK_VERSION=${tensorflow_version}
inc_new_api=false
# ======== set up config for tensorflow models ========
if [ "${model}" == "resnet50v1.5" ]; then
model_src_dir="image_recognition/tensorflow_models/resnet50_v1_5/quantization/ptq"
dataset_location="/tf_dataset/dataset/TF_mini_imagenet"
input_model="/tf_dataset/pre-trained-models/resnet50v1_5/fp32/resnet50_v1.pb"
new_benchmark=true
inc_new_api=true
tuning_cmd="bash run_quant.sh --dataset_location=${dataset_location} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --dataset_location=${dataset_location} --batch_size=1 --mode=performance"
elif [ "${model}" == "ssd_resnet50_v1" ];then
model_src_dir="object_detection/tensorflow_models/ssd_resnet50_v1/quantization/ptq"
dataset_location="/tf_dataset/tensorflow/mini-coco-100.record"
input_model="/tf_dataset/pre-train-model-oob/object_detection/ssd_resnet50_v1/frozen_inference_graph.pb"
new_benchmark=true
inc_new_api=true
tuning_cmd="bash run_quant.sh --dataset_location=${dataset_location} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --dataset_location=${dataset_location} --batch_size=1 --mode=performance"
elif [ "${model}" == "ssd_mobilenet_v1_ckpt" ];then
model_src_dir="object_detection/tensorflow_models/ssd_mobilenet_v1/quantization/ptq"
dataset_location="/tf_dataset/tensorflow/mini-coco-100.record"
input_model="/tf_dataset/pre-train-model-oob/object_detection/ssd_mobilenet_v1"
new_benchmark=true
inc_new_api=true
tuning_cmd="bash run_quant.sh --dataset_location=${dataset_location} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --dataset_location=${dataset_location} --batch_size=1 --mode=performance"
elif [ "${model}" == "inception_v1" ]; then
model_src_dir="image_recognition/tensorflow_models/quantization/ptq"
dataset_location="/tf_dataset/dataset/TF_mini_imagenet"
input_model="/tf_dataset/pre-train-model-slim/pbfile/frozen_pb/frozen_inception_v1.pb"
yaml="inception_v1.yaml"
strategy="basic"
batch_size=1
new_benchmark=true
tuning_cmd="bash run_tuning.sh --config=${yaml} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --config=${yaml} --mode=performance"
elif [ "${model}" == "darknet19" ]; then
model_src_dir="oob_models/quantization/ptq"
dataset_location=""
input_model="/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/PublicInHouse/classification/darknet19/darknet19.pb"
yaml="config.yaml"
strategy="basic"
batch_size=1
new_benchmark=false
inc_new_api=true
tuning_cmd="bash run_quant.sh --topology=${model} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=performance --batch_size=1 --iters=500"
elif [ "${model}" == "densenet-121" ]; then
model_src_dir="oob_models/quantization/ptq"
dataset_location=""
input_model="/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/densenet/121/tf/densenet-121.pb"
yaml="config.yaml"
strategy="basic"
batch_size=1
new_benchmark=false
inc_new_api=true
tuning_cmd="bash run_quant.sh --topology=${model} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=performance --batch_size=1 --iters=500"
elif [ "${model}" == "resnet-101" ]; then
model_src_dir="oob_models/quantization/ptq"
dataset_location=""
input_model="/tf_dataset/tensorflow/tf_oob_models/ov/all_tf_models/classification/resnet/v1/101/tf/resnet-101.pb"
yaml="config.yaml"
strategy="basic"
batch_size=1
new_benchmark=false
inc_new_api=true
tuning_cmd="bash run_quant.sh --topology=${model} --input_model=${input_model}"
benchmark_cmd="bash run_benchmark.sh --topology=${model} --mode=performance --batch_size=1 --iters=500"
fi
/bin/bash run_model_trigger_common.sh \
--yaml=${yaml} \
--framework=${FRAMEWORK} \
--fwk_ver=${FRAMEWORK_VERSION} \
--model=${model} \
--model_src_dir=${model_src_dir} \
--dataset_location=${dataset_location} \
--input_model=${input_model} \
--batch_size=${batch_size} \
--strategy=${strategy} \
--new_benchmark=${new_benchmark} \
--tuning_cmd="${tuning_cmd}" \
--benchmark_cmd="${benchmark_cmd}" \
--inc_new_api="${inc_new_api}" \
--mode=${mode} \
--USE_TUNE_ACC=${USE_TUNE_ACC} \
--PERF_STABLE_CHECK=${PERF_STABLE_CHECK} \
--BUILD_BUILDID=${BUILD_BUILDID}

View File

@@ -0,0 +1,30 @@
#!/bin/bash
set -eo pipefail
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
# get parameters
PATTERN='[-a-zA-Z0-9_]*='
starttime=`date +'%Y-%m-%d %H:%M:%S'`
for i in "$@"
do
case $i in
--tuning_cmd=*)
tuning_cmd=`echo $i | sed "s/${PATTERN}//"`;;
--strategy=*)
strategy=`echo $i | sed "s/${PATTERN}//"`;;
*)
echo "Parameter $i not recognized."; exit 1;;
esac
done
eval "/usr/bin/time -v ${tuning_cmd}"
$BOLD_YELLOW && echo "====== finish tuning. echo information. ======" && $RESET
endtime=`date +'%Y-%m-%d %H:%M:%S'`
start_seconds=$(date --date="$starttime" +%s);
end_seconds=$(date --date="$endtime" +%s);
$BOLD_GREEN && echo "Tuning time spend: "$((end_seconds-start_seconds))"s " && $RESET
$BOLD_GREEN && echo "Tuning strategy: ${strategy}" && $RESET
$BOLD_GREEN && echo "Total resident size (kbytes): $(cat /proc/meminfo | grep 'MemTotal' | sed 's/[^0-9]//g')" && $RESET

View File

@@ -0,0 +1,322 @@
import argparse
import os
import platform
import re
from typing import Optional, Union
import psutil
system = platform.system()
try:
import ruamel.yaml as yaml
except:
import ruamel_yaml as yaml
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--yaml", type=str, required=True, help="Path to yaml config.")
parser.add_argument("--framework", type=str, required=True, help="Framework of model.")
parser.add_argument("--dataset_location", type=str, required=True, help="Location of dataset used for model.")
parser.add_argument("--strategy", type=str, required=False, help="Strategy to update.")
parser.add_argument("--batch_size", type=int, required=False, help="Batch size.")
parser.add_argument("--new_benchmark", type=str, required=False, help="Whether to modify benchmark config.")
parser.add_argument("--multi_instance", type=str, required=False, help="Whether to eval in multi-instance.")
return parser.parse_args()
def update_yaml_dataset(yaml, framework, dataset_location):
if not os.path.isfile(yaml):
raise Exception(f"Not found yaml config at '{yaml}' location.")
print("Reading config")
with open(yaml, "r") as config:
lines = config.readlines()
# Update dataset
if framework != "pytorch":
val_txt_location = os.path.dirname(dataset_location) + f"{os.path.sep}" + "val.txt"
patterns = {
"root_path": {
"pattern": r"root:.*/path/to/(calibration|evaluation)/dataset/?",
"replacement": f"root: {dataset_location}",
},
"data_path": {
"pattern": r"data_path:.*/path/to/(calibration|evaluation)/dataset/?",
"replacement": f"data_path: {dataset_location}",
},
"image_list": {
"pattern": r"image_list:.*/path/to/(calibration|evaluation)/label/?",
"replacement": f"image_list: {val_txt_location}",
},
"data_dir": {
"pattern": r"data_dir:.*/path/to/dataset/?",
"replacement": f"data_dir: {dataset_location}",
},
}
print("======= update_yaml_dataset =======")
with open(yaml, "w") as config:
for line in lines:
for key, key_patterns in patterns.items():
if re.search(key_patterns["pattern"], line):
print(f"Replacing {key} key.")
line = re.sub(key_patterns["pattern"], key_patterns["replacement"], line)
config.write(line)
else:
val_dataset = dataset_location + f"{os.path.sep}" + "val"
train_dataset = dataset_location + f"{os.path.sep}" + "train"
patterns = {
"calibration_dataset": {
"pattern": r"root:.*/path/to/calibration/dataset/?",
"replacement": f"root: {train_dataset}",
},
"evaluation_dataset": {
"pattern": r"root:.*/path/to/evaluation/dataset/?",
"replacement": f"root: {val_dataset}",
},
}
print("======= update_yaml_dataset =======")
with open(yaml, "w") as config:
for line in lines:
for key, key_patterns in patterns.items():
if re.search(key_patterns["pattern"], line):
print(f"Replacing {key} key.")
line = re.sub(key_patterns["pattern"], key_patterns["replacement"], line)
config.write(line)
def update_yaml_config_tuning(
yaml_file,
strategy=None,
mode=None,
batch_size=None,
iteration=None,
max_trials=None,
algorithm=None,
timeout=None,
strategy_token=None,
sampling_size=None,
dtype=None,
tf_new_api=None,
):
with open(yaml_file) as f:
yaml_config = yaml.round_trip_load(f, preserve_quotes=True)
if algorithm:
try:
model_wise = yaml_config.get("quantization", {}).get("model_wise", {})
prev_activation = model_wise.get("activation", {})
if not prev_activation:
model_wise.update({"activation": {}})
prev_activation = model_wise.get("activation", {})
prev_activation.update({"algorithm": algorithm})
except Exception as e:
print(f"[ WARNING ] {e}")
if timeout:
try:
exit_policy = yaml_config.get("tuning", {}).get("exit_policy", {})
prev_timeout = exit_policy.get("timeout", None)
exit_policy.update({"timeout": timeout})
print(f"Changed {prev_timeout} to {timeout}")
except Exception as e:
print(f"[ WARNING ] {e}")
if strategy and strategy != "basic": # Workaround for PyTorch huggingface models (`sed` in run_quant.sh)
try:
tuning_config = yaml_config.get("tuning", {})
prev_strategy = tuning_config.get("strategy", {})
if not prev_strategy:
tuning_config.update({"strategy": {}})
prev_strategy = tuning_config.get("strategy", {})
strategy_name = prev_strategy.get("name", None)
prev_strategy.update({"name": strategy})
if strategy == "sigopt":
prev_strategy.update(
{
"sigopt_api_token": strategy_token,
"sigopt_project_id": "lpot",
"sigopt_experiment_name": "lpot-tune",
}
)
if strategy == "hawq":
prev_strategy.update({"loss": "CrossEntropyLoss"})
print(f"Changed {strategy_name} to {strategy}")
except Exception as e:
print(f"[ WARNING ] {e}")
if max_trials and max_trials > 0:
try:
tuning_config = yaml_config.get("tuning", {})
prev_exit_policy = tuning_config.get("exit_policy", {})
if not prev_exit_policy:
tuning_config.update({"exit_policy": {"max_trials": max_trials}})
else:
prev_max_trials = prev_exit_policy.get("max_trials", None)
prev_exit_policy.update({"max_trials": max_trials})
print(f"Changed {prev_max_trials} to {max_trials}")
except Exception as e:
print(f"[ WARNING ] {e}")
if mode == "accuracy":
try:
# delete performance part in yaml if exist
performance = yaml_config.get("evaluation", {}).get("performance", {})
if performance:
yaml_config.get("evaluation", {}).pop("performance", {})
# accuracy batch_size replace
if batch_size:
try:
dataloader = yaml_config.get("evaluation", {}).get("accuracy", {}).get("dataloader", {})
prev_batch_size = dataloader.get("batch_size", None)
dataloader.update({"batch_size": batch_size})
print(f"Changed accuracy batch size from {prev_batch_size} to {batch_size}")
except Exception as e:
print(f"[ WARNING ] {e}")
except Exception as e:
print(f"[ WARNING ] {e}")
elif mode:
try:
# delete accuracy part in yaml if exist
accuracy = yaml_config.get("evaluation", {}).get("accuracy", {})
if accuracy:
yaml_config.get("evaluation", {}).pop("accuracy", {})
# performance iteration replace
if iteration:
try:
performance = yaml_config.get("evaluation", {}).get("performance", {})
prev_iteration = performance.get("iteration", None)
performance.update({"iteration": iteration})
print(f"Changed performance batch size from {prev_iteration} to {iteration}")
except Exception as e:
print(f"[ WARNING ] {e}")
if batch_size and mode == "latency":
try:
dataloader = yaml_config.get("evaluation", {}).get("performance", {}).get("dataloader", {})
prev_batch_size = dataloader.get("batch_size", None)
dataloader.update({"batch_size": batch_size})
print(f"Changed accuracy batch size from {prev_batch_size} to {batch_size}")
except Exception as e:
print(f"[ WARNING ] {e}")
except Exception as e:
print(f"[ WARNING ] {e}")
if sampling_size:
try:
calibration = yaml_config.get("quantization", {}).get("calibration", {})
prev_sampling_size = calibration.get("sampling_size", None)
calibration.update({"sampling_size": sampling_size})
print(f"Changed calibration sampling size from {prev_sampling_size} to {sampling_size}")
except Exception as e:
print(f"[ WARNING ] {e}")
if dtype:
try:
quantization = yaml_config.get("quantization", {})
prev_dtype = quantization.get("dtype", None)
quantization.update({"dtype": dtype})
print(f"Changed dtype from {prev_dtype} to {dtype}")
except Exception as e:
print(f"[ WARNING ] {e}")
if tf_new_api == "true":
try:
model = yaml_config.get("model", {})
prev_framework = model.get("framework", None)
model.update({"framework": "inteltensorflow"})
print(f"Changed framework from {prev_framework} to inteltensorflow")
except Exception as e:
print(f"[ WARNING ] {e}")
print("====== update_yaml_config_tuning ========")
yaml_content = yaml.round_trip_dump(yaml_config)
with open(yaml_file, "w") as output_file:
output_file.write(yaml_content)
def update_yaml_config_benchmark_acc(yaml_path: str, batch_size=None):
with open(yaml_path) as f:
yaml_config = yaml.round_trip_load(f, preserve_quotes=True)
try:
accuracy = yaml_config.get("evaluation", {}).get("accuracy", {})
if not accuracy:
raise AttributeError
dataloader = accuracy.get("dataloader", {})
if dataloader:
dataloader.update({"batch_size": batch_size})
configs = accuracy.get("configs", {})
if configs:
del accuracy["configs"]
except Exception as e:
print(f"[ WARNING ] {e}")
print("====== update_yaml_config_benchmark_acc ========")
yaml_content = yaml.round_trip_dump(yaml_config)
with open(yaml_path, "w") as output_file:
output_file.write(yaml_content)
def update_yaml_config_benchmark_perf(yaml_path: str, batch_size=None, multi_instance=None):
# Get cpu information for multi-instance
total_cores = psutil.cpu_count(logical=False)
total_sockets = 1
ncores_per_socket = total_cores / total_sockets
ncores_per_instance = ncores_per_socket
iters = 100
if multi_instance == "true":
ncores_per_instance = 4
iters = 500
with open(yaml_path) as f:
yaml_config = yaml.round_trip_load(f, preserve_quotes=True)
try:
performance = yaml_config.get("evaluation", {}).get("performance", {})
if not performance:
raise AttributeError
dataloader = performance.get("dataloader", {})
if dataloader:
dataloader.update({"batch_size": batch_size})
performance.update({"iteration": iters})
configs = performance.get("configs", {})
if not configs:
raise AttributeError
else:
configs.update(
{
"cores_per_instance": int(ncores_per_instance),
"num_of_instance": int(ncores_per_socket // ncores_per_instance),
}
)
for attr in ["intra_num_of_threads", "inter_num_of_threads", "kmp_blocktime"]:
if configs.get(attr):
del configs[attr]
print(configs)
except Exception as e:
print(f"[ WARNING ] {e}")
print("====== update_yaml_config_benchmark_perf ========")
yaml_content = yaml.round_trip_dump(yaml_config)
with open(yaml_path, "w") as output_file:
output_file.write(yaml_content)
if __name__ == "__main__":
args = parse_args()
update_yaml_dataset(args.yaml, args.framework, args.dataset_location)
update_yaml_config_tuning(args.yaml, strategy=args.strategy)
print("===== multi_instance={} ====".format(args.multi_instance))
if args.new_benchmark == "true":
update_yaml_config_benchmark_acc(args.yaml, batch_size=args.batch_size)
update_yaml_config_benchmark_perf(args.yaml, batch_size=args.batch_size, multi_instance=args.multi_instance)

View File

@@ -0,0 +1,134 @@
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
set -e
pip install coverage
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.${1}
coverage_log="/neural-compressor/log_dir/coverage_log"
coverage_log_base="/neural-compressor/log_dir/coverage_log_base"
coverage_compare="/neural-compressor/log_dir/coverage_compare.html"
cd /neural-compressor/log_dir
$BOLD_YELLOW && echo "collect coverage for PR branch" && $RESET
cp ut_3x_coverage/.coverage /neural-compressor/
mkdir -p coverage_PR
cd /neural-compressor
coverage report -m --rcfile=${COVERAGE_RCFILE} | tee ${coverage_log}
coverage html -d log_dir/coverage_PR/htmlcov --rcfile=${COVERAGE_RCFILE}
coverage xml -o log_dir/coverage_PR/coverage.xml --rcfile=${COVERAGE_RCFILE}
ls -l log_dir/coverage_PR/htmlcov
$BOLD_YELLOW && echo "collect coverage for baseline" && $RESET
cd /neural-compressor
cp -r /neural-compressor/.azure-pipelines .azure-pipelines-pr
git config --global --add safe.directory /neural-compressor
git fetch
git checkout master
rm -rf build dist *egg-info
binary_index="${1%_fp8}"
echo y | pip uninstall neural_compressor_${binary_index}
cd /neural-compressor/.azure-pipelines-pr/scripts && bash install_nc.sh ${1}
coverage erase
cd /neural-compressor/log_dir
mkdir -p coverage_base
rm -rf /neural-compressor/.coverage || true
cp ut_3x_baseline_coverage/.coverage /neural-compressor
cd /neural-compressor
coverage report -m --rcfile=${COVERAGE_RCFILE} | tee ${coverage_log_base}
coverage html -d log_dir/coverage_base/htmlcov --rcfile=${COVERAGE_RCFILE}
coverage xml -o log_dir/coverage_base/coverage.xml --rcfile=${COVERAGE_RCFILE}
ls -l log_dir/coverage_base/htmlcov
get_coverage_data() {
# Input argument
local coverage_xml="$1"
# Get coverage data
local coverage_data=$(python3 -c "import xml.etree.ElementTree as ET; root = ET.parse('$coverage_xml').getroot(); print(ET.tostring(root).decode())")
if [[ -z "$coverage_data" ]]; then
echo "Failed to get coverage data from $coverage_xml."
exit 1
fi
# Get lines coverage
local lines_covered=$(echo "$coverage_data" | grep -o 'lines-covered="[0-9]*"' | cut -d '"' -f 2)
local lines_valid=$(echo "$coverage_data" | grep -o 'lines-valid="[0-9]*"' | cut -d '"' -f 2)
if [ $lines_valid == 0 ]; then
local lines_coverage=0
else
local lines_coverage=$(awk "BEGIN {printf \"%.3f\", 100 * $lines_covered / $lines_valid}")
fi
# Get branches coverage
local branches_covered=$(echo "$coverage_data" | grep -o 'branches-covered="[0-9]*"' | cut -d '"' -f 2)
local branches_valid=$(echo "$coverage_data" | grep -o 'branches-valid="[0-9]*"' | cut -d '"' -f 2)
if [ $branches_valid == 0 ]; then
local branches_coverage=0
else
local branches_coverage=$(awk "BEGIN {printf \"%.3f\", 100 * $branches_covered/$branches_valid}")
fi
# Return values
echo "$lines_covered $lines_valid $lines_coverage $branches_covered $branches_valid $branches_coverage"
}
$BOLD_YELLOW && echo "compare coverage" && $RESET
coverage_PR_xml="log_dir/coverage_PR/coverage.xml"
coverage_PR_data=$(get_coverage_data $coverage_PR_xml)
read lines_PR_covered lines_PR_valid coverage_PR_lines_rate branches_PR_covered branches_PR_valid coverage_PR_branches_rate <<<"$coverage_PR_data"
coverage_base_xml="log_dir/coverage_base/coverage.xml"
coverage_base_data=$(get_coverage_data $coverage_base_xml)
read lines_base_covered lines_base_valid coverage_base_lines_rate branches_base_covered branches_base_valid coverage_base_branches_rate <<<"$coverage_base_data"
$BOLD_BLUE && echo "PR lines coverage: $lines_PR_covered/$lines_PR_valid ($coverage_PR_lines_rate%)" && $RESET
$BOLD_BLUE && echo "PR branches coverage: $branches_PR_covered/$branches_PR_valid ($coverage_PR_branches_rate%)" && $RESET
$BOLD_BLUE && echo "BASE lines coverage: $lines_base_covered/$lines_base_valid ($coverage_base_lines_rate%)" && $RESET
$BOLD_BLUE && echo "BASE branches coverage: $branches_base_covered/$branches_base_valid ($coverage_base_branches_rate%)" && $RESET
$BOLD_YELLOW && echo "clear upload path" && $RESET
rm -fr log_dir/coverage_PR/.coverage*
rm -fr log_dir/coverage_base/.coverage*
rm -fr log_dir/ut-coverage-*
# Declare an array to hold failed items
declare -a fail_items=()
if (( $(bc -l <<< "${coverage_PR_lines_rate}+0.05 < ${coverage_base_lines_rate}") )); then
fail_items+=("lines")
fi
if (( $(bc -l <<< "${coverage_PR_branches_rate}+0.05 < ${coverage_base_branches_rate}") )); then
fail_items+=("branches")
fi
if [[ ${#fail_items[@]} -ne 0 ]]; then
fail_items_str=$(
IFS=', '
echo "${fail_items[*]}"
)
for item in "${fail_items[@]}"; do
case "$item" in
lines)
decrease=$(echo $(printf "%.3f" $(echo "$coverage_PR_lines_rate - $coverage_base_lines_rate" | bc -l)))
;;
branches)
decrease=$(echo $(printf "%.3f" $(echo "$coverage_PR_branches_rate - $coverage_base_branches_rate" | bc -l)))
;;
*)
echo "Unknown item: $item"
continue
;;
esac
$BOLD_RED && echo "Unit Test failed with ${item} coverage decrease ${decrease}%" && $RESET
done
$BOLD_RED && echo "compare coverage to give detail info" && $RESET
bash /neural-compressor/.azure-pipelines-pr/scripts/ut/compare_coverage.sh ${coverage_compare} ${coverage_log} ${coverage_log_base} "FAILED" ${coverage_PR_lines_rate} ${coverage_base_lines_rate} ${coverage_PR_branches_rate} ${coverage_base_branches_rate}
exit 1
else
$BOLD_GREEN && echo "Unit Test success with coverage lines: ${coverage_PR_lines_rate}%, branches: ${coverage_PR_branches_rate}%" && $RESET
$BOLD_GREEN && echo "compare coverage to give detail info" && $RESET
bash /neural-compressor/.azure-pipelines-pr/scripts/ut/compare_coverage.sh ${coverage_compare} ${coverage_log} ${coverage_log_base} "SUCCESS" ${coverage_PR_lines_rate} ${coverage_base_lines_rate} ${coverage_PR_branches_rate} ${coverage_base_branches_rate}
fi

View File

@@ -0,0 +1,19 @@
[run]
branch = True
[report]
include =
*/neural_compressor/common/*
*/neural_compressor/torch/*
omit =
*/neural_compressor/torch/algorithms/fp8_quant/*
*/neural_compressor/torch/algorithms/mixed_low_precision/*
*/neural_compressor/torch/amp/*
exclude_lines =
pragma: no cover
raise NotImplementedError
raise TypeError
if self.device == "gpu":
if device == "gpu":
except ImportError:
except Exception as e:

View File

@@ -0,0 +1,15 @@
[run]
branch = True
[report]
include =
*/neural_compressor/torch/algorithms/fp8_quant/*
*/neural_compressor/torch/algorithms/mixed_low_precision/*
exclude_lines =
pragma: no cover
raise NotImplementedError
raise TypeError
if self.device == "gpu":
if device == "gpu":
except ImportError:
except Exception as e:

View File

@@ -0,0 +1,15 @@
[run]
branch = True
[report]
include =
*/neural_compressor/common/*
*/neural_compressor/tensorflow/*
exclude_lines =
pragma: no cover
raise NotImplementedError
raise TypeError
if self.device == "gpu":
if device == "gpu":
except ImportError:
except Exception as e:

View File

@@ -0,0 +1,47 @@
#!/bin/bash
python -c "import neural_compressor as nc"
test_case="run 3x Torch"
echo "${test_case}"
echo "##[section]Run import check"
set -e
python -c "import neural_compressor.torch"
python -c "import neural_compressor.common"
echo "##[section]import check pass"
# install requirements
echo "##[group]set up UT env..."
export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH
pip install -r /neural-compressor/test/3x/torch/requirements.txt
pip install pytest-cov
pip install pytest-html
echo "##[endgroup]"
pip list
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.3x_pt
inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])')
cd /neural-compressor/test/3x || exit 1
rm -rf tensorflow
rm -rf torch/algorithms/fp8_quant
rm -rf torch/quantization/fp8_quant
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_3x_pt.log
find . -name "test*.py" | sed "s,\.\/,python -m pytest --cov=\"${inc_path}\" --cov-report term --html=report.html --self-contained-html --cov-report xml:coverage.xml --cov-append -vs --disable-warnings ,g" > run.sh
cat run.sh
bash run.sh 2>&1 | tee ${ut_log_name}
cp report.html ${LOG_DIR}/
if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then
echo "Find errors in pytest case, please check the output..."
echo "Please search for '== FAILURES ==' or '== ERRORS =='"
exit 1
fi
# if ut pass, collect the coverage file into artifacts
cp .coverage ${LOG_DIR}/.coverage
echo "UT finished successfully! "

View File

@@ -0,0 +1,63 @@
#!/bin/bash
python -c "import neural_compressor as nc"
test_case="run 3x Torch Habana FP8"
echo "${test_case}"
echo "##[section]Run import check"
set -e
python -c "import neural_compressor.torch"
python -c "import neural_compressor.common"
echo "##[section]import check pass"
# install requirements
echo "##[group]set up UT env..."
export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH
sed -i '/^intel_extension_for_pytorch/d' /neural-compressor/test/3x/torch/requirements.txt
sed -i '/^auto_round/d' /neural-compressor/test/3x/torch/requirements.txt
cat /neural-compressor/test/3x/torch/requirements.txt
pip install -r /neural-compressor/test/3x/torch/requirements.txt
pip install pytest-cov
pip install pytest-html
pip install pytest-html-merger
echo "##[endgroup]"
pip list
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.3x_pt_fp8
inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])')
cd /neural-compressor/test/3x || exit 1
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_3x_pt_fp8.log
pytest --cov="${inc_path}" -vs --disable-warnings --html=report_1.html --self-contained-html torch/quantization/weight_only/test_load.py 2>&1 | tee -a ${ut_log_name}
pytest --cov="${inc_path}" -vs --disable-warnings --html=report_2.html --self-contained-html torch/quantization/weight_only/test_rtn.py 2>&1 | tee -a ${ut_log_name}
# pytest --cov="${inc_path}" -vs --disable-warnings --html=report_3.html --self-contained-html torch/quantization/weight_only/test_autoround.py 2>&1 | tee -a ${ut_log_name}
# Below folder contains some special configuration for pytest so we need to enter the path and run it separately
cd /neural-compressor/test/3x/torch/algorithms/fp8_quant
pytest --cov="${inc_path}" -vs --disable-warnings --html=report_4.html --self-contained-html . 2>&1 | tee -a ${ut_log_name}
cp .coverage ${LOG_DIR}/.coverage.algo_fp8
cd - && mv /neural-compressor/test/3x/torch/algorithms/fp8_quant/*.html .
# Below folder contains some special configuration for pytest so we need to enter the path and run it separately
cd /neural-compressor/test/3x/torch/quantization/fp8_quant
pytest --cov="${inc_path}" -vs --disable-warnings --html=report_5.html --self-contained-html . 2>&1 | tee -a ${ut_log_name}
cp .coverage ${LOG_DIR}/.coverage.quant_fp8
cd - && mv /neural-compressor/test/3x/torch/quantization/fp8_quant/*.html .
mkdir -p report && mv *.html report
pytest_html_merger -i ./report -o ./report.html
cp report.html ${LOG_DIR}/
if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then
echo "Find errors in pytest case, please check the output..."
echo "Please search for '== FAILURES ==' or '== ERRORS =='"
exit 1
fi
# if ut pass, collect the coverage file into artifacts
cp .coverage ${LOG_DIR}/.coverage
cd ${LOG_DIR}
coverage combine .coverage.*
echo "UT finished successfully! "

View File

@@ -0,0 +1,76 @@
#!/bin/bash
python -c "import neural_compressor as nc"
test_case="run 3x TensorFlow"
echo "${test_case}"
echo "##[section]Run import check"
set -e
python -c "import neural_compressor.tensorflow"
python -c "import neural_compressor.common"
echo "##[section]import check pass"
# install requirements
echo "##[group]set up UT env..."
pip install -r /neural-compressor/test/3x/tensorflow/requirements.txt
pip install pytest-cov
pip install pytest-html
pip install pytest-html-merger
echo "##[endgroup]"
pip list
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/3x/coverage.3x_tf
inc_path=$(python -c 'import neural_compressor; print(neural_compressor.__path__[0])')
cd /neural-compressor/test/3x || exit 1
rm -rf torch
rm -rf onnxrt
mv tensorflow/keras ../3x_keras
mv tensorflow/quantization/ptq/newapi ../3x_newapi
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_3x_tf.log
# test for tensorflow ut
pytest --cov="${inc_path}" -vs --disable-warnings --html=report_tf_quant.html --self-contained-html ./tensorflow/quantization 2>&1 | tee -a ${ut_log_name}
rm -rf tensorflow/quantization
pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_tf_test_quantize_model.html --self-contained-html ./tensorflow/test_quantize_model.py 2>&1 | tee -a ${ut_log_name}
rm -rf tensorflow/test_quantize_model.py
pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_tf.html --self-contained-html . 2>&1 | tee -a ${ut_log_name}
# test for tensorflow new api ut
pip uninstall tensorflow -y
pip install /tf_dataset/tf_binary/230928/tensorflow*.whl
pip install cmake
pip install protobuf==3.20.3
pip install horovod==0.27.0
pip list
rm -rf tensorflow/*
mkdir -p tensorflow/quantization/ptq
mv ../3x_newapi tensorflow/quantization/ptq/newapi
find . -name "test*.py" | sed "s,\.\/,python -m pytest --cov=${inc_path} --cov-append -vs --disable-warnings ,g" > run.sh
cat run.sh
bash run.sh 2>&1 | tee -a ${ut_log_name}
# test for itex ut
rm -rf tensorflow/*
mv ../3x_keras tensorflow/keras
pip uninstall tensorflow -y
pip install intel-extension-for-tensorflow[cpu]
pytest --cov="${inc_path}" --cov-append -vs --disable-warnings --html=report_keras.html --self-contained-html ./tensorflow 2>&1 | tee -a ${ut_log_name}
mkdir -p report
mv *.html report
pytest_html_merger -i ./report -o ./report.html
cp report.html ${LOG_DIR}/
if [ $(grep -c '== FAILURES ==' ${ut_log_name}) != 0 ] || [ $(grep -c '== ERRORS ==' ${ut_log_name}) != 0 ] || [ $(grep -c ' passed' ${ut_log_name}) == 0 ]; then
echo "Find errors in pytest case, please check the output..."
echo "Please search for '== FAILURES ==' or '== ERRORS =='"
exit 1
fi
# if ut pass, collect the coverage file into artifacts
cp .coverage ${LOG_DIR}/.coverage
echo "UT finished successfully! "

View File

@@ -0,0 +1,139 @@
source /neural-compressor/.azure-pipelines/scripts/change_color.sh
pip install coverage
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
coverage_log="/neural-compressor/log_dir/coverage_log"
coverage_log_base="/neural-compressor/log_dir/coverage_log_base"
coverage_compare="/neural-compressor/log_dir/coverage_compare.html"
cd /neural-compressor/log_dir
$BOLD_YELLOW && echo "##[group]collect coverage for PR branch" && $RESET
mkdir -p coverage_PR
cp ut_*_coverage/.coverage.* ./coverage_PR/
cd coverage_PR
coverage combine --keep --rcfile=${COVERAGE_RCFILE}
cp .coverage /neural-compressor/.coverage
cd /neural-compressor
coverage report -m --rcfile=${COVERAGE_RCFILE} | tee ${coverage_log}
coverage html -d log_dir/coverage_PR/htmlcov --rcfile=${COVERAGE_RCFILE}
coverage xml -o log_dir/coverage_PR/coverage.xml --rcfile=${COVERAGE_RCFILE}
ls -l log_dir/coverage_PR/htmlcov
cd /neural-compressor
cp -r /neural-compressor/.azure-pipelines .azure-pipelines-pr
git config --global --add safe.directory /neural-compressor
git fetch
git checkout master
rm -rf build dist *egg-info
echo y | pip uninstall neural-compressor
cd /neural-compressor/.azure-pipelines-pr/scripts && bash install_nc.sh
echo "##[endgroup]"
$BOLD_YELLOW && echo "##[group]collect coverage for baseline" && $RESET
coverage erase
cd /neural-compressor/log_dir
mkdir -p coverage_base
cp ut-base_*_coverage/.coverage.* ./coverage_base/
cd coverage_base
coverage combine --keep --rcfile=${COVERAGE_RCFILE}
cp .coverage /neural-compressor/.coverage
cd /neural-compressor
coverage report -m --rcfile=${COVERAGE_RCFILE} | tee ${coverage_log_base}
coverage html -d log_dir/coverage_base/htmlcov --rcfile=${COVERAGE_RCFILE}
coverage xml -o log_dir/coverage_base/coverage.xml --rcfile=${COVERAGE_RCFILE}
ls -l log_dir/coverage_base/htmlcov
echo "##[endgroup]"
get_coverage_data() {
# Input argument
local coverage_xml="$1"
# Get coverage data
local coverage_data=$(python3 -c "import xml.etree.ElementTree as ET; root = ET.parse('$coverage_xml').getroot(); print(ET.tostring(root).decode())")
if [[ -z "$coverage_data" ]]; then
echo "Failed to get coverage data from $coverage_xml."
exit 1
fi
# Get lines coverage
local lines_covered=$(echo "$coverage_data" | grep -o 'lines-covered="[0-9]*"' | cut -d '"' -f 2)
local lines_valid=$(echo "$coverage_data" | grep -o 'lines-valid="[0-9]*"' | cut -d '"' -f 2)
if [ $lines_valid == 0 ]; then
local lines_coverage=0
else
local lines_coverage=$(awk "BEGIN {printf \"%.3f\", 100 * $lines_covered / $lines_valid}")
fi
# Get branches coverage
local branches_covered=$(echo "$coverage_data" | grep -o 'branches-covered="[0-9]*"' | cut -d '"' -f 2)
local branches_valid=$(echo "$coverage_data" | grep -o 'branches-valid="[0-9]*"' | cut -d '"' -f 2)
if [ $branches_valid == 0 ]; then
local branches_coverage=0
else
local branches_coverage=$(awk "BEGIN {printf \"%.3f\", 100 * $branches_covered/$branches_valid}")
fi
# Return values
echo "$lines_covered $lines_valid $lines_coverage $branches_covered $branches_valid $branches_coverage"
}
$BOLD_YELLOW && echo "compare coverage" && $RESET
coverage_PR_xml="log_dir/coverage_PR/coverage.xml"
coverage_PR_data=$(get_coverage_data $coverage_PR_xml)
read lines_PR_covered lines_PR_valid coverage_PR_lines_rate branches_PR_covered branches_PR_valid coverage_PR_branches_rate <<<"$coverage_PR_data"
coverage_base_xml="log_dir/coverage_base/coverage.xml"
coverage_base_data=$(get_coverage_data $coverage_base_xml)
read lines_base_covered lines_base_valid coverage_base_lines_rate branches_base_covered branches_base_valid coverage_base_branches_rate <<<"$coverage_base_data"
$BOLD_BLUE && echo "PR lines coverage: $lines_PR_covered/$lines_PR_valid ($coverage_PR_lines_rate%)" && $RESET
$BOLD_BLUE && echo "PR branches coverage: $branches_PR_covered/$branches_PR_valid ($coverage_PR_branches_rate%)" && $RESET
$BOLD_BLUE && echo "BASE lines coverage: $lines_base_covered/$lines_base_valid ($coverage_base_lines_rate%)" && $RESET
$BOLD_BLUE && echo "BASE branches coverage: $branches_base_covered/$branches_base_valid ($coverage_base_branches_rate%)" && $RESET
$BOLD_YELLOW && echo "clear upload path" && $RESET
rm -fr log_dir/coverage_PR/.coverage*
rm -fr log_dir/coverage_base/.coverage*
rm -fr log_dir/ut-coverage-*
# Declare an array to hold failed items
declare -a fail_items=()
if (( $(bc -l <<< "${coverage_PR_lines_rate}+0.05 < ${coverage_base_lines_rate}") )); then
fail_items+=("lines")
fi
if (( $(bc -l <<< "${coverage_PR_branches_rate}+0.05 < ${coverage_base_branches_rate}") )); then
fail_items+=("branches")
fi
if [[ ${#fail_items[@]} -ne 0 ]]; then
fail_items_str=$(
IFS=', '
echo "${fail_items[*]}"
)
for item in "${fail_items[@]}"; do
case "$item" in
lines)
decrease=$(echo $(printf "%.3f" $(echo "$coverage_PR_lines_rate - $coverage_base_lines_rate" | bc -l)))
;;
branches)
decrease=$(echo $(printf "%.3f" $(echo "$coverage_PR_branches_rate - $coverage_base_branches_rate" | bc -l)))
;;
*)
echo "Unknown item: $item"
continue
;;
esac
$BOLD_RED && echo "Unit Test failed with ${item} coverage decrease ${decrease}%" && $RESET
done
$BOLD_RED && echo "compare coverage to give detail info" && $RESET
bash /neural-compressor/.azure-pipelines-pr/scripts/ut/compare_coverage.sh ${coverage_compare} ${coverage_log} ${coverage_log_base} "FAILED" ${coverage_PR_lines_rate} ${coverage_base_lines_rate} ${coverage_PR_branches_rate} ${coverage_base_branches_rate}
exit 1
else
$BOLD_GREEN && echo "Unit Test success with coverage lines: ${coverage_PR_lines_rate}%, branches: ${coverage_PR_branches_rate}%" && $RESET
$BOLD_GREEN && echo "compare coverage to give detail info" && $RESET
bash /neural-compressor/.azure-pipelines-pr/scripts/ut/compare_coverage.sh ${coverage_compare} ${coverage_log} ${coverage_log_base} "SUCCESS" ${coverage_PR_lines_rate} ${coverage_base_lines_rate} ${coverage_PR_branches_rate} ${coverage_base_branches_rate}
fi

View File

@@ -0,0 +1,225 @@
output_file=$1
coverage_pr_log=$2
coverage_base_log=$3
coverage_status=$4
coverage_PR_lines_rate=$5
coverage_base_lines_rate=$6
coverage_PR_branches_rate=$7
coverage_base_branches_rate=$8
module_name="neural_compressor"
[[ ! -f $coverage_pr_log ]] && exit 1
[[ ! -f $coverage_base_log ]] && exit 1
file_name="./coverage_compare"
sed -i "s|\/usr.*${module_name}\/||g" $coverage_pr_log
sed -i "s|\/usr.*${module_name}\/||g" $coverage_base_log
diff $coverage_pr_log $coverage_base_log >diff_file
[[ $? == 0 ]] && exit 0
grep -Po "[<,>,\d].*" diff_file | awk '{print $1 "\t" $2 "\t" $3 "\t" $4 "\t" $5 "\t" $6 "\t" $7}' | sed "/Name/d" | sed "/TOTAL/d" | sed "/---/d" >$file_name
[[ ! -s $file_name ]] && exit 0
[[ -f $output_file ]] && rm -f $output_file
touch $output_file
function generate_html_head {
cat >${output_file} <<eof
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>UT coverage</title>
<style type="text/css">
body {
margin: 0;
padding: 0;
background: white no-repeat left top;
}
.main {
margin: 20px auto 10px auto;
background: white;
border-radius: 8px;
-moz-border-radius: 8px;
-webkit-border-radius: 8px;
padding: 0 30px 30px 30px;
border: 1px solid #adaa9f;
box-shadow: 0 2px 2px #9c9c9c;
-moz-box-shadow: 0 2px 2px #9c9c9c;
-webkit-box-shadow: 0 2px 2px #9c9c9c;
}
.features-table {
width: 100%;
margin: 0 auto;
border-collapse: separate;
border-spacing: 0;
text-shadow: 0 1px 0 #fff;
color: #2a2a2a;
background: #fafafa;
background-image: -moz-linear-gradient(top, #fff, #eaeaea, #fff);
/* Firefox 3.6 */
background-image: -webkit-gradient(linear, center bottom, center top, from(#fff), color-stop(0.5, #eaeaea), to(#fff));
font-family: Verdana, Arial, Helvetica
}
.features-table th,
td {
text-align: center;
height: 25px;
line-height: 25px;
padding: 0 8px;
border: 1px solid #cdcdcd;
box-shadow: 0 1px 0 white;
-moz-box-shadow: 0 1px 0 white;
-webkit-box-shadow: 0 1px 0 white;
white-space: nowrap;
}
</style>
</head>
eof
}
function extract_diff_data() {
local file_name=$1 diff_file=$2 reg=$3
local file=$(cat $file_name | grep "${diff_file}" | grep -v ".*/${diff_file}" | grep -Po "${reg}.*" | sed "s/${reg}[ \t]*//g" | awk '{print $1}')
local stmts=$(cat $file_name | grep "${diff_file}" | grep -v ".*/${diff_file}" | grep -Po "${reg}.*" | sed "s/${reg}[ \t]*//g" | awk '{print $2}')
local miss=$(cat $file_name | grep "${diff_file}" | grep -v ".*/${diff_file}" | grep -Po "${reg}.*" | sed "s/${reg}[ \t]*//g" | awk '{print $3}')
local cover=$(cat $file_name | grep "${diff_file}" | grep -v ".*/${diff_file}" | grep -Po "${reg}.*" | sed "s/${reg}[ \t]*//g" | awk '{print $6}')
local branch=$(cat $file_name | grep "${diff_file}" | grep -v ".*/${diff_file}" | grep -Po "${reg}.*" | sed "s/${reg}[ \t]*//g" | awk '{print $4}')
echo "$file $stmts $miss $cover $branch"
}
function write_compare_details() {
local file=$1 stmts1=$2 miss1=$3 branch1=$4 cover1=$5 stmts2=$6 miss2=$7 branch2=$8 cover2=$9
echo """
<tr>
<td>PR | BASE</td>
<td style=\"text-align:left\">${file}</td>
<td style=\"text-align:left\">${stmts1} | ${stmts2}</td>
<td style=\"text-align:left\">${miss1} | ${miss2}</td>
<td style=\"text-align:left\">${branch1} | ${branch2}</td>
<td style=\"text-align:left\">${cover1} | ${cover2}</td>
</tr>
""" >>${output_file}
}
function get_color() {
local decrease=$1
if (($(echo "$decrease < 0" | bc -l))); then
local color="#FFD2D2"
else
local color="#90EE90"
fi
echo "$color"
}
function generate_coverage_summary() {
# generate table head
local Lines_cover_decrease=$(echo $(printf "%.3f" $(echo "$coverage_PR_lines_rate - $coverage_base_lines_rate" | bc -l)))
local Branches_cover_decrease=$(echo $(printf "%.3f" $(echo "$coverage_PR_branches_rate - $coverage_base_branches_rate" | bc -l)))
read lines_coverage_color <<<"$(get_color ${Lines_cover_decrease})"
read branches_coverage_color <<<"$(get_color ${Branches_cover_decrease})"
echo """
<body>
<div class="main">
<h1 align="center">Coverage Summary : ${coverage_status}</h1>
<table class=\"features-table\" style=\"width: 60%;margin-left:auto;margin-right:auto;empty-cells: hide\">
<tr>
<th></th>
<th>Base coverage</th>
<th>PR coverage</th>
<th>Diff</th>
</tr>
<tr>
<td> Lines </td>
<td> ${coverage_base_lines_rate}% </td>
<td> ${coverage_PR_lines_rate}% </td>
<td style=\"background-color:${lines_coverage_color}\"> ${Lines_cover_decrease}% </td>
</tr>
<tr>
<td> Branches </td>
<td> ${coverage_base_branches_rate}% </td>
<td> ${coverage_PR_branches_rate}% </td>
<td style=\"background-color:${branches_coverage_color}\"> ${Branches_cover_decrease}% </td>
</tr>
</table>
</div>
""" >>${output_file}
}
function generate_coverage_details() {
echo """
<div class="main">
<h2 align="center">Coverage Detail</h2>
<table class=\"features-table\" style=\"width: 60%;margin-left:auto;margin-right:auto;empty-cells: hide\">
<tr>
<th>Commit</th>
<th>FileName</th>
<th>Stmts</th>
<th>Miss</th>
<th>Branch</th>
<th>Cover</th>
</tr>
""" >>${output_file}
# generate compare detail
cat ${file_name} | while read line; do
if [[ $(echo $line | grep "[0-9]a[0-9]") ]] && [[ $(grep -A 1 "$line" ${file_name} | grep ">") ]]; then
diff_lines=$(sed -n "/${line}/,/^[0-9]/p" ${file_name} | grep ">")
diff_file_name=$(sed -n "/${line}/,/^[0-9]/p" ${file_name} | grep -Po ">.*[a-z,A-Z].*.py" | sed "s|>||g")
for diff_file in ${diff_file_name}; do
diff_file=$(echo "${diff_file}" | sed 's/[ \t]*//g')
diff_coverage_data=$(extract_diff_data ${file_name} ${diff_file} ">")
read file stmts miss cover branch <<<"$diff_coverage_data"
write_compare_details $file "NA" "NA" "NA" "NA" $stmts $miss $branch $cover
done
elif [[ $(echo $line | grep "[0-9]c[0-9]") ]] && [[ $(cat ${file_name} | grep -A 1 "$line" | grep "<") ]]; then
diff_lines=$(sed -n "/${line}/,/^[0-9]/p" ${file_name} | grep "<")
diff_file_name=$(sed -n "/${line}/,/^[0-9]/p" ${file_name} | grep -Po "<.*[a-z,A-Z].*.py" | sed "s|<||g")
for diff_file in ${diff_file_name}; do
diff_file=$(echo "${diff_file}" | sed 's/[ \t]*//g')
diff_coverage_data1=$(extract_diff_data ${file_name} ${diff_file} "<")
read file1 stmts1 miss1 cover1 branch1 <<<"$diff_coverage_data1"
diff_coverage_data2=$(extract_diff_data ${file_name} ${diff_file} ">")
read file2 stmts2 miss2 cover2 branch2 <<<"$diff_coverage_data2"
write_compare_details $file1 $stmts1 $miss1 $branch1 $cover1 $stmts2 $miss2 $branch2 $cover2
done
elif [[ $(echo $line | grep "[0-9]d[0-9]") ]] && [[ $(cat ${file_name} | grep -A 1 "$line" | grep "<") ]]; then
diff_lines=$(sed -n "/${line}/,/^[0-9]/p" ${file_name} | grep "<")
diff_file_name=$(sed -n "/${line}/,/^[0-9]/p" ${file_name} | grep -Po "<.*[a-z,A-Z].*.py" | sed "s|<||g")
for diff_file in ${diff_file_name}; do
diff_file=$(echo "${diff_file}" | sed 's/[ \t]*//g')
diff_coverage_data=$(extract_diff_data ${file_name} ${diff_file} "<")
read file stmts miss cover branch <<<"$diff_coverage_data"
write_compare_details $file $stmts $miss $branch $cover "NA" "NA" "NA" "NA"
done
fi
done
# generate table end
echo """
</table>
</div>
</body>
</html>""" >>${output_file}
}
function main {
generate_html_head
generate_coverage_summary
if [[ ${coverage_status} = "SUCCESS" ]]; then
echo """</body></html>""" >>${output_file}
echo "coverage PASS, no need to compare difference"
exit 0
else
generate_coverage_details
fi
}
main

View File

@@ -0,0 +1,30 @@
[run]
branch = True
[report]
omit =
*/**/fake*yaml
*/**/fake.py
*/neural_compressor/model/nets_factory.py
*/neural_compressor/benchmark.py
*/neural_compressor/experimental/benchmark.py
*/neural_compressor/contrib/strategy/tpe.py
*/intel_extension_for_transformers/backends/*
*/intel_extension_for_transformers/optimization/utils/get_throughput.py
*/neural_compressor/adaptor/tf_utils/graph_rewriter/generic/fuse_decomposed_in.py
*/neural_compressor/adaptor/tf_utils/quantize_graph/qdq/fuse_qdq_in.py
*/neural_compressor/adaptor/tf_utils/graph_rewriter/int8/freeze_value.py
*/neural_compressor/template/*
*/neural_compressor/common/*
*/neural_compressor/torch/*
*/neural_compressor/tensorflow/*
exclude_lines =
pragma: no cover
raise NotImplementedError
raise TypeError
if self.device == "gpu":
if device == "gpu":
except ImportError:
except Exception as e:
onnx_version < ONNX18_VERSION
onnx_version >= ONNX18_VERSION

View File

@@ -0,0 +1,116 @@
#!/bin/bash
set -x
echo "copy pre-train model..."
mkdir -p /tmp/.neural_compressor/inc_ut || true
cp -r /tf_dataset/ut-localfile/resnet_v2 /tmp/.neural_compressor/inc_ut || true
mkdir -p ~/.keras/datasets || true
cp -r /tf_dataset/ut-localfile/cifar-10-batches-py* ~/.keras/datasets || true
ll ~/.keras/datasets
echo "install dependencies..."
echo "tensorflow version is $tensorflow_version"
echo "itex version is $itex_version"
echo "pytorch version is $pytorch_version"
echo "torchvision version is $torchvision_version"
echo "ipex version is $ipex_version"
echo "onnx version is $onnx_version"
echo "onnxruntime version is $onnxruntime_version"
echo "mxnet version is $mxnet_version"
test_case=$1
echo -e "##[group]test case is ${test_case}"
if [[ "${tensorflow_version}" == *"-official" ]]; then
pip install tensorflow==${tensorflow_version%-official}
elif [[ "${tensorflow_version}" == "spr-base" ]]; then
pip install /tf_dataset/tf_binary/230928/tensorflow*.whl
pip install cmake
pip install protobuf==3.20.3
pip install horovod==0.27.0
if [[ $? -ne 0 ]]; then
exit 1
fi
elif [[ "${tensorflow_version}" != "" ]]; then
pip install intel-tensorflow==${tensorflow_version}
fi
if [[ "${itex_version}" != "" ]]; then
pip install --upgrade intel-extension-for-tensorflow[cpu]==${itex_version}
pip install tf2onnx
fi
if [[ "${pytorch_version}" != "" ]]; then
pip install torch==${pytorch_version} -f https://download.pytorch.org/whl/torch_stable.html
fi
if [[ "${torchvision_version}" != "" ]]; then
pip install torchvision==${torchvision_version} -f https://download.pytorch.org/whl/torch_stable.html
fi
if [[ "${ipex_version}" != "" ]]; then
pip install intel-extension-for-pytorch=="${ipex_version%+cpu}"
fi
if [[ "${onnx_version}" != "" ]]; then
pip install onnx==${onnx_version}
fi
if [[ "${onnxruntime_version}" != "" ]]; then
pip install onnxruntime==${onnxruntime_version}
if [[ "${onnxruntime_version}" == "1.14"* ]]; then
pip install onnxruntime-extensions==0.8.0
else
pip install onnxruntime-extensions
fi
pip install optimum
fi
if [ "${mxnet_version}" != '' ]; then
pip install numpy==1.23.5
echo "re-install pycocotools resolve the issue with numpy..."
pip uninstall pycocotools -y
pip install --no-cache-dir pycocotools
pip install mxnet==${mxnet_version}
fi
# install special test env requirements
# common deps
pip install cmake
pip install transformers
if [[ $(echo "${test_case}" | grep -c "others") != 0 ]];then
pip install tf_slim xgboost accelerate==0.21.0 peft
elif [[ $(echo "${test_case}" | grep -c "nas") != 0 ]]; then
pip install dynast==1.6.0rc1
elif [[ $(echo "${test_case}" | grep -c "tf pruning") != 0 ]]; then
pip install tensorflow-addons
# Workaround
# horovod can't be install in the env with TF and PT together
# so test distribute cases in the env with single fw installed
pip install horovod
fi
if [[ $(echo "${test_case}" | grep -c "api") != 0 ]] || [[ $(echo "${test_case}" | grep -c "adaptor") != 0 ]]; then
pip install auto-round
fi
# test deps
pip install coverage
pip install pytest
pip install pytest-html
echo "##[endgroup]"
pip list
echo "[DEBUG] list pipdeptree..."
pip install pipdeptree
pipdeptree
# import torch before import tensorflow
if [[ $(echo "${test_case}" | grep -c "run basic api") != 0 ]] || [[ $(echo "${test_case}" | grep -c "run basic others") != 0 ]] || [[ $(echo "${test_case}" | grep -c "run basic adaptor") != 0 ]]; then
cd /neural-compressor/test || exit 1
find . -name "test*.py" | xargs sed -i 's/import tensorflow as tf/import torch; import tensorflow as tf/g'
find . -name "test*.py" | xargs sed -i 's/import tensorflow.compat.v1 as tf/import torch; import tensorflow.compat.v1 as tf/g'
find . -name "test*.py" | xargs sed -i 's/from tensorflow import keras/import torch; from tensorflow import keras/g'
fi

View File

@@ -0,0 +1,35 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic adaptor"
echo "${test_case}"
echo "specify fwk version..."
source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh $1
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
export LD_LIBRARY_PATH=/usr/local/lib/:$LD_LIBRARY_PATH
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find ./adaptor -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_adaptor.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.adaptor
echo "UT finished successfully! "

View File

@@ -0,0 +1,33 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic tfnewapi"
echo "${test_case}"
echo "specify fwk version..."
export tensorflow_version='spr-base'
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
# export FORCE_BF16=1
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find ./tfnewapi -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_tf_newapi.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.tfnewapi
echo "UT finished successfully! "

View File

@@ -0,0 +1,38 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic api quantization/benchmark/export/mixed_precision/distillation/scheduler/nas"
echo "${test_case}"
echo "specify fwk version..."
source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh $1
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find ./quantization* -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
find ./benchmark* -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
find ./export* -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
find ./mixed_precision* -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
find ./distillation -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
find ./scheduler -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
find ./nas -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_api.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.api
echo "UT finished successfully! "

View File

@@ -0,0 +1,35 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic itex"
echo "${test_case}"
echo "specify fwk version..."
export itex_version='2.15.0.0'
export tensorflow_version='2.15.0-official'
export onnx_version='1.16.0'
export onnxruntime_version='1.18.0'
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find ./itex -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_itex.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.itex
echo "UT finished successfully! "

View File

@@ -0,0 +1,50 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic others"
echo "${test_case}"
echo "specify fwk version..."
source /neural-compressor/.azure-pipelines/scripts/fwk_version.sh $1
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find . -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
sed -i '/ adaptor\//d' run.sh
sed -i '/ tfnewapi\//d' run.sh
sed -i '/ itex\//d' run.sh
sed -i '/ pruning_with_pt/d' run.sh
sed -i '/ pruning_with_tf/d' run.sh
sed -i '/ quantization/d' run.sh
sed -i '/ benchmark/d' run.sh
sed -i '/ export/d' run.sh
sed -i '/ mixed_precision/d' run.sh
sed -i '/ distillation\//d' run.sh
sed -i '/ scheduler\//d' run.sh
sed -i '/ nas\//d' run.sh
sed -i '/ 3x\//d' run.sh
sed -i '/ distributed\//d' run.sh
echo "copy model for dynas..."
mkdir -p .torch/ofa_nets || true
cp -r /tf_dataset/ut-localfile/ofa_mbv3_d234_e346_k357_w1.2 .torch/ofa_nets || true
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_others.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.others
echo "UT finished successfully! "

View File

@@ -0,0 +1,35 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic pt pruning"
echo "${test_case}"
echo "specify fwk version..."
export pytorch_version='2.4.0+cpu'
export torchvision_version='0.18.0+cpu'
export ipex_version='2.4.0+cpu'
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find ./pruning_with_pt -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
# find ./distributed -name "test_distributed_pt_train.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_pt_pruning.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.pt_pruning
echo "UT finished successfully! "

View File

@@ -0,0 +1,33 @@
#!/bin/bash
python -c "import neural_compressor as nc;print(nc.version.__version__)"
test_case="run basic tf pruning"
echo "${test_case}"
echo "specify fwk version..."
export tensorflow_version='2.14.0'
echo "set up UT env..."
bash /neural-compressor/.azure-pipelines/scripts/ut/env_setup.sh "${test_case}"
export COVERAGE_RCFILE=/neural-compressor/.azure-pipelines/scripts/ut/coverage.file
lpot_path=$(python -c 'import neural_compressor; import os; print(os.path.dirname(neural_compressor.__file__))')
cd /neural-compressor/test || exit 1
find ./pruning_with_tf -name "test*.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'> run.sh
find ./distributed -name "test_distributed_tf_dataloader.py" | sed 's,\.\/,coverage run --source='"${lpot_path}"' --append ,g' | sed 's/$/ --verbose/'>> run.sh
LOG_DIR=/neural-compressor/log_dir
mkdir -p ${LOG_DIR}
ut_log_name=${LOG_DIR}/ut_tf_pruning.log
echo "cat run.sh..."
sort run.sh -o run.sh
cat run.sh | tee ${ut_log_name}
echo "------UT start-------"
bash -x run.sh 2>&1 | tee -a ${ut_log_name}
echo "------UT end -------"
if [ $(grep -c "FAILED" ${ut_log_name}) != 0 ] || [ $(grep -c "core dumped" ${ut_log_name}) != 0 ] || [ $(grep -c "ModuleNotFoundError:" ${ut_log_name}) != 0 ] || [ $(grep -c "OK" ${ut_log_name}) == 0 ];then
echo "Find errors in UT test, please check the output..."
exit 1
fi
cp .coverage ${LOG_DIR}/.coverage.tf_pruning
echo "UT finished successfully! "

View File

@@ -0,0 +1,42 @@
parameters:
- name: codeScanFileName
type: string
- name: uploadPath
type: string
- name: codeScanContainerName
type: string
default: "codeScan"
- name: scanModule
type: string
default: "neural_compressor"
steps:
- template: docker-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
repoName: "code-scan"
repoTag: "1.0"
dockerFileName: "DockerfileCodeScan"
containerName: ${{ parameters.codeScanContainerName }}
- script: |
docker exec ${{ parameters.codeScanContainerName }} bash -c "bash /neural-compressor/.azure-pipelines/scripts/codeScan/${{ parameters.codeScanFileName }}/${{ parameters.codeScanFileName }}.sh \
--scan_module=${{ parameters.scanModule }}"
displayName: "${{ parameters.codeScanFileName }} Check"
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
inputs:
targetPath: .azure-pipelines/scripts/codeScan/scanLog/${{ parameters.uploadPath }}
artifact: $(System.JobAttempt)_${{ parameters.codeScanFileName }}
publishLocation: "pipeline"
displayName: "PublishPipelineArtifact"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec ${{ parameters.codeScanContainerName }} bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"

View File

@@ -0,0 +1,103 @@
parameters:
- name: dockerConfigName
type: string
default: "commonDockerConfig"
- name: repoName
type: string
default: "neural-compressor"
- name: repoTag
type: string
default: "py310"
- name: dockerFileName
type: string
default: "Dockerfile"
- name: containerName
type: string
- name: repo
type: string
default: "https://github.com/intel/neural-compressor"
- name: imageSource
type: string
default: "build"
steps:
- task: Bash@3
inputs:
targetType: "inline"
script: |
docker ps -a
if [[ $(docker ps -a | grep -i '${{ parameters.containerName }}'$) ]]; then
docker start $(docker ps -aq --filter "name=${{ parameters.containerName }}")
echo "remove left files through container ..."
docker exec ${{ parameters.containerName }} bash -c "ls -a /neural-compressor && rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* && ls -a /neural-compressor || true"
fi
displayName: "Docker workspace clean up"
- ${{ if eq(parameters.dockerConfigName, 'commonDockerConfig') }}:
- script: |
rm -fr ${BUILD_SOURCESDIRECTORY} || sudo rm -fr ${BUILD_SOURCESDIRECTORY} || true
displayName: "Clean workspace"
- checkout: self
clean: true
displayName: "Checkout out Repo"
fetchDepth: 0
- ${{ if eq(parameters.dockerConfigName, 'gitCloneDockerConfig') }}:
- script: |
rm -fr ${BUILD_SOURCESDIRECTORY} || sudo rm -fr ${BUILD_SOURCESDIRECTORY} || true
mkdir ${BUILD_SOURCESDIRECTORY}
chmod 777 ${BUILD_SOURCESDIRECTORY}
displayName: "Clean workspace"
- checkout: none
- script: |
git clone ${{ parameters.repo }} ${BUILD_SOURCESDIRECTORY}
git config --global --add safe.directory ${BUILD_SOURCESDIRECTORY}
cd ${BUILD_SOURCESDIRECTORY}
git checkout master
displayName: "Checkout out master"
- ${{ if eq(parameters.imageSource, 'build') }}:
- script: |
docker image prune -a -f
if [[ ! $(docker images | grep -i ${{ parameters.repoName }}:${{ parameters.repoTag }}) ]]; then
docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/${{parameters.dockerFileName}}.devel -t ${{ parameters.repoName }}:${{ parameters.repoTag }} .
fi
docker images | grep -i ${{ parameters.repoName }}
if [[ $? -ne 0 ]]; then
echo "NO Such Repo"
exit 1
fi
displayName: "Build develop docker image"
- ${{ if eq(parameters.imageSource, 'pull') }}:
- script: |
docker pull vault.habana.ai/gaudi-docker/1.19.0/ubuntu22.04/habanalabs/pytorch-installer-2.5.1:latest
displayName: "Pull habana docker image"
- script: |
docker stop $(docker ps -aq --filter "name=${{ parameters.containerName }}")
docker rm -vf ${{ parameters.containerName }} || true
env | sort
displayName: "Clean docker container"
- ${{ if ne(parameters.containerName, '') }}:
- task: Bash@3
inputs:
targetType: "inline"
script: |
if [[ "${{ parameters.imageSource }}" == "build" ]]; then
docker run -dit --disable-content-trust --privileged --name=${{ parameters.containerName }} --shm-size="2g" \
-v ${BUILD_SOURCESDIRECTORY}:/neural-compressor -v /tf_dataset:/tf_dataset -v /tf_dataset2:/tf_dataset2 \
${{ parameters.repoName }}:${{ parameters.repoTag }}
else
docker run -dit --disable-content-trust --privileged --name=${{ parameters.containerName }} --shm-size="2g" \
--runtime=habana -e HABANA_VISIBLE_DEVICES=all -e OMPI_MCA_btl_vader_single_copy_mechanism=none --cap-add=sys_nice --net=host --ipc=host \
-v ${BUILD_SOURCESDIRECTORY}:/neural-compressor vault.habana.ai/gaudi-docker/1.19.0/ubuntu22.04/habanalabs/pytorch-installer-2.5.1:latest
docker exec ${{ parameters.containerName }} bash -c "ln -sf \$(which python3) /usr/bin/python"
fi
echo "Show the container list after docker run ... "
docker ps -a
displayName: "Docker run - ${{ parameters.containerName }} Container"

View File

@@ -0,0 +1,80 @@
parameters:
- name: modelName
type: string
default: "resnet50v1.5"
- name: framework
type: string
default: "tensorflow"
- name: APIVersion
type: string
default: ""
- name: modelContainerName
type: string
default: "model"
steps:
- template: docker-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
repoName: "neural-compressor"
repoTag: "py310"
dockerFileName: "Dockerfile"
containerName: ${{ parameters.modelContainerName }}
- script: |
docker exec ${{ parameters.modelContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts/models \
&& bash run_${{ parameters.framework }}_models_trigger.sh --model=${{ parameters.modelName }} --mode='env_setup'"
displayName: Env setup
- task: DownloadPipelineArtifact@2
continueOnError: true
inputs:
source: "specific"
artifact: ${{ parameters.framework }}_${{ parameters.modelName }}
patterns: "**_summary.log"
path: $(Build.SourcesDirectory)/.azure-pipelines/scripts/models/${{ parameters.modelName }}_refer_log
project: $(System.TeamProject)
pipeline: "Model-Test"
runVersion: "specific"
runId: $(refer_buildId)
retryDownloadCount: 3
displayName: "Download refer logs"
- script: |
docker exec ${{ parameters.modelContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts/models \
&& bash run_${{ parameters.framework }}_models_trigger.sh --model=${{ parameters.modelName }} --mode='tuning'"
displayName: Quantization
- ${{ if ne(parameters.APIVersion, '3x') }}:
- script: |
docker exec ${{ parameters.modelContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts/models \
&& bash run_${{ parameters.framework }}_models_trigger.sh --model=${{ parameters.modelName }} --mode='int8_benchmark' --USE_TUNE_ACC=$(USE_TUNE_ACC) --PERF_STABLE_CHECK=$(PERF_STABLE_CHECK)"
displayName: INT8 Benchmark
- script: |
docker exec ${{ parameters.modelContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts/models \
&& bash run_${{ parameters.framework }}_models_trigger.sh --model=${{ parameters.modelName }} --mode='fp32_benchmark' --USE_TUNE_ACC=$(USE_TUNE_ACC) --PERF_STABLE_CHECK=$(PERF_STABLE_CHECK)"
displayName: FP32 Benchmark
- task: Bash@3
inputs:
targetType: "inline"
script: |
docker exec ${{ parameters.modelContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts/models \
&& bash run_${{ parameters.framework }}_models_trigger.sh --model=${{ parameters.modelName }} --mode='collect_log' --BUILD_BUILDID=$(Build.BuildId)"
displayName: Collect log
- task: PublishPipelineArtifact@1
inputs:
targetPath: $(Build.SourcesDirectory)/.azure-pipelines/scripts/models/${{ parameters.modelName }}/
artifact: ${{ parameters.framework }}_${{ parameters.modelName }}
publishLocation: "pipeline"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec ${{ parameters.modelContainerName }} bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"

View File

@@ -0,0 +1,61 @@
parameters:
- name: dockerConfigName
type: string
default: "commonDockerConfig"
- name: repo
type: string
default: "https://github.com/intel/neural-compressor"
- name: utScriptFileName
type: string
- name: uploadPath
type: string
- name: utArtifact
type: string
- name: utTestMode
type: string
default: "coverage"
- name: utContainerName
type: string
default: "utTest"
- name: imageSource
type: string
default: "build"
steps:
- template: docker-template.yml
parameters:
dockerConfigName: ${{ parameters.dockerConfigName }}
repoName: "neural-compressor"
repoTag: "py310"
dockerFileName: "Dockerfile"
containerName: ${{ parameters.utContainerName }}
repo: ${{ parameters.repo }}
imageSource: ${{ parameters.imageSource }}
- script: |
docker exec ${{ parameters.utContainerName }} bash -c "cd /neural-compressor/.azure-pipelines/scripts \
&& bash install_nc.sh ${{ parameters.utScriptFileName }} \
&& bash ut/${{ parameters.utScriptFileName }}.sh ${{ parameters.utTestMode }}"
displayName: "Run UT"
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
inputs:
targetPath: ${{ parameters.uploadPath }}
artifact: $(System.JobAttempt)_${{ parameters.utArtifact }}_report
publishLocation: "pipeline"
- ${{ if eq(parameters.utTestMode, 'coverage') }}:
- task: PublishPipelineArtifact@1
inputs:
targetPath: ${{ parameters.uploadPath }}
artifact: ${{ parameters.utArtifact }}_coverage
publishLocation: "pipeline"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec ${{ parameters.utContainerName }} bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"

View File

@@ -0,0 +1,118 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- .azure-pipelines/scripts/ut/3x/run_3x_pt_fp8.sh
- .azure-pipelines/scripts/install_nc.sh
- .azure-pipelines/ut-3x-pt-fp8.yml
- .azure-pipelines/template/docker-template.yml
- neural_compressor/common
- neural_compressor/torch
- test/3x/torch/algorithms/fp8_quant
- test/3x/torch/quantization/fp8_quant
- test/3x/torch/quantization/weight_only/test_rtn.py
- test/3x/torch/quantization/weight_only/test_load.py
- setup.py
- requirements_pt.txt
pool: GAUDI
variables:
IMAGE_NAME: "neural-compressor"
IMAGE_TAG: "py310"
UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir
DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir
ARTIFACT_NAME: "UT_coverage_report_3x_pt_fp8"
REPO: $(Build.Repository.Uri)
stages:
- stage: Torch_habana
displayName: Torch 3x Habana FP8
dependsOn: []
jobs:
- job:
displayName: Torch 3x Habana FP8
steps:
- template: template/ut-template.yml
parameters:
imageSource: "pull"
dockerConfigName: "commonDockerConfig"
utScriptFileName: "3x/run_3x_pt_fp8"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x"
- stage: Torch_habana_baseline
displayName: Torch 3x Habana FP8 baseline
dependsOn: []
jobs:
- job:
displayName: Torch 3x Habana FP8 baseline
steps:
- template: template/ut-template.yml
parameters:
imageSource: "pull"
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "3x/run_3x_pt_fp8"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x_baseline"
- stage: Coverage
displayName: "Coverage Compare"
pool:
vmImage: "ubuntu-latest"
dependsOn: [Torch_habana, Torch_habana_baseline]
jobs:
- job: CollectDatafiles
steps:
- script: |
if [[ ! $(docker images | grep -i ${IMAGE_NAME}:${IMAGE_TAG}) ]]; then
docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/Dockerfile.devel -t ${IMAGE_NAME}:${IMAGE_TAG} .
fi
docker images | grep -i ${IMAGE_NAME}
if [[ $? -ne 0 ]]; then
echo "NO Such Repo"
exit 1
fi
displayName: "Build develop docker image"
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: '*_coverage/.coverage'
path: $(DOWNLOAD_PATH)
- script: |
echo "--- create container ---"
docker run -d -it --name="collectLogs" -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor ${IMAGE_NAME}:${IMAGE_TAG} /bin/bash
echo "--- docker ps ---"
docker ps
echo "--- collect logs ---"
docker exec collectLogs /bin/bash +x -c "cd /neural-compressor/.azure-pipelines/scripts \
&& bash install_nc.sh 3x_pt_fp8 \
&& bash ut/3x/collect_log_3x.sh 3x_pt_fp8"
displayName: "Collect UT Coverage"
- task: PublishCodeCoverageResults@2
inputs:
summaryFileLocation: $(Build.SourcesDirectory)/log_dir/coverage_PR/coverage.xml
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
inputs:
targetPath: $(UPLOAD_PATH)
artifact: $(ARTIFACT_NAME)
publishLocation: "pipeline"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec collectLogs bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"

View File

@@ -0,0 +1,116 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- neural_compressor/common
- neural_compressor/torch
- test/3x/torch
- test/3x/common
- setup.py
- requirements_pt.txt
- .azure-pipelines/ut-3x-pt.yml
- .azure-pipelines/template/docker-template.yml
- .azure-pipelines/scripts/install_nc.sh
- .azure-pipelines/scripts/ut/3x/run_3x_pt.sh
pool: ICX-16C
variables:
IMAGE_NAME: "neural-compressor"
IMAGE_TAG: "py310"
UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir
DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir
ARTIFACT_NAME: "UT_coverage_report_3x_pt"
REPO: $(Build.Repository.Uri)
stages:
- stage: Torch
displayName: Unit Test 3x Torch
dependsOn: []
jobs:
- job:
displayName: Unit Test 3x Torch
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "3x/run_3x_pt"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x"
- stage: Torch_baseline
displayName: Unit Test 3x Torch baseline
dependsOn: []
jobs:
- job:
displayName: Unit Test 3x Torch baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "3x/run_3x_pt"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x_baseline"
repo: $(REPO)
- stage: Coverage
displayName: "Coverage Compare"
pool:
vmImage: "ubuntu-latest"
dependsOn: [Torch, Torch_baseline]
jobs:
- job: CollectDatafiles
steps:
- script: |
if [[ ! $(docker images | grep -i ${IMAGE_NAME}:${IMAGE_TAG}) ]]; then
docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/Dockerfile.devel -t ${IMAGE_NAME}:${IMAGE_TAG} .
fi
docker images | grep -i ${IMAGE_NAME}
if [[ $? -ne 0 ]]; then
echo "NO Such Repo"
exit 1
fi
displayName: "Build develop docker image"
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: '*_coverage/.coverage'
path: $(DOWNLOAD_PATH)
- script: |
echo "--- create container ---"
docker run -d -it --name="collectLogs" -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor ${IMAGE_NAME}:${IMAGE_TAG} /bin/bash
echo "--- docker ps ---"
docker ps
echo "--- collect logs ---"
docker exec collectLogs /bin/bash +x -c "cd /neural-compressor/.azure-pipelines/scripts \
&& bash install_nc.sh 3x_pt \
&& bash ut/3x/collect_log_3x.sh 3x_pt"
displayName: "Collect UT Coverage"
- task: PublishCodeCoverageResults@2
inputs:
summaryFileLocation: $(Build.SourcesDirectory)/log_dir/coverage_PR/coverage.xml
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
inputs:
targetPath: $(UPLOAD_PATH)
artifact: $(ARTIFACT_NAME)
publishLocation: "pipeline"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec collectLogs bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"

View File

@@ -0,0 +1,113 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- neural_compressor/common
- neural_compressor/tensorflow
- test/3x/tensorflow
- test/3x/common
- setup.py
- requirements_tf.txt
- .azure-pipelines/scripts/ut/3x/run_3x_tf.sh
- .azure-pipelines/template/docker-template.yml
pool: ICX-16C
variables:
IMAGE_NAME: "neural-compressor"
IMAGE_TAG: "py310"
UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir
DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir
ARTIFACT_NAME: "UT_coverage_report_3x_tf"
REPO: $(Build.Repository.Uri)
stages:
- stage: TensorFlow
displayName: Unit Test 3x TensorFlow
dependsOn: []
jobs:
- job:
displayName: Unit Test 3x TensorFlow
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "3x/run_3x_tf"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x"
- stage: TensorFlow_baseline
displayName: Unit Test 3x TensorFlow baseline
dependsOn: []
jobs:
- job:
displayName: Unit Test 3x TensorFlow baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "3x/run_3x_tf"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_3x_baseline"
repo: $(REPO)
- stage: Coverage
displayName: "Coverage Compare"
pool:
vmImage: "ubuntu-latest"
dependsOn: [TensorFlow, TensorFlow_baseline]
jobs:
- job: CollectDatafiles
steps:
- script: |
if [[ ! $(docker images | grep -i ${IMAGE_NAME}:${IMAGE_TAG}) ]]; then
docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/Dockerfile.devel -t ${IMAGE_NAME}:${IMAGE_TAG} .
fi
docker images | grep -i ${IMAGE_NAME}
if [[ $? -ne 0 ]]; then
echo "NO Such Repo"
exit 1
fi
displayName: "Build develop docker image"
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: '*_coverage/.coverage'
path: $(DOWNLOAD_PATH)
- script: |
echo "--- create container ---"
docker run -d -it --name="collectLogs" -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor ${IMAGE_NAME}:${IMAGE_TAG} /bin/bash
echo "--- docker ps ---"
docker ps
echo "--- collect logs ---"
docker exec collectLogs /bin/bash +x -c "cd /neural-compressor/.azure-pipelines/scripts \
&& bash install_nc.sh 3x_tf \
&& bash ut/3x/collect_log_3x.sh 3x_tf"
displayName: "Collect UT Coverage"
- task: PublishCodeCoverageResults@2
inputs:
summaryFileLocation: $(Build.SourcesDirectory)/log_dir/coverage_PR/coverage.xml
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
inputs:
targetPath: $(UPLOAD_PATH)
artifact: $(ARTIFACT_NAME)
publishLocation: "pipeline"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec collectLogs bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"

View File

@@ -0,0 +1,287 @@
trigger: none
pr:
autoCancel: true
drafts: false
branches:
include:
- master
paths:
include:
- neural_compressor
- test
- setup.py
- requirements.txt
- .azure-pipelines/ut-basic.yml
- .azure-pipelines/template/docker-template.yml
- .azure-pipelines/scripts/ut
- .azure-pipelines/scripts/fwk_version.sh
- .azure-pipelines/scripts/install_nc.sh
exclude:
- test/3x
- neural_compressor/common
- neural_compressor/torch
- neural_compressor/tensorflow
- neural_compressor/onnxrt
- neural_compressor/transformers
- neural_compressor/evaluation
- .azure-pipelines/scripts/ut/3x
pool: ICX-16C
variables:
IMAGE_NAME: "neural-compressor"
IMAGE_TAG: "py310"
UPLOAD_PATH: $(Build.SourcesDirectory)/log_dir
DOWNLOAD_PATH: $(Build.SourcesDirectory)/log_dir
ARTIFACT_NAME: "UT_coverage_report"
REPO: $(Build.Repository.Uri)
stages:
- stage: Adaptor
displayName: Unit Test FWKs adaptor
dependsOn: []
jobs:
- job:
displayName: Test FWKs adaptor
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_adaptor"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_adaptor"
- stage: API
displayName: Unit Test User facing API
dependsOn: []
jobs:
- job:
displayName: Test User facing API
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_api"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_api"
- stage: Pruning
displayName: Unit Test Pruning
dependsOn: []
jobs:
- job:
displayName: Test PyTorch Pruning
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_pt_pruning"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_pt-pruning"
- job:
displayName: Test TensorFlow Pruning
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_tf_pruning"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_tf-pruning"
- stage: TFNewAPI
displayName: Unit Test TF newAPI
dependsOn: []
jobs:
- job:
displayName: Test TF newAPI
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_adaptor_tfnewapi"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_tfnewapi"
- stage: ITEX
displayName: Unit Test ITEX
dependsOn: []
jobs:
- job:
displayName: Test ITEX
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_itex"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_itex"
- stage: Others
displayName: Unit Test other basic case
dependsOn: []
jobs:
- job:
displayName: Test other basic case
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "commonDockerConfig"
utScriptFileName: "run_basic_others"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut_others"
- stage: Adaptor_base
displayName: Unit Test FWKs adaptor baseline
dependsOn: []
jobs:
- job:
displayName: Test FWKs adaptor baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_adaptor"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_adaptor"
repo: $(REPO)
- stage: API_base
displayName: Unit Test User facing API baseline
dependsOn: []
jobs:
- job:
displayName: Test User facing API baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_api"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_api"
repo: $(REPO)
- stage: Pruning_base
displayName: Unit Test Pruning baseline
dependsOn: []
jobs:
- job:
displayName: Test PyTorch Pruning baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_pt_pruning"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_pt-pruning"
repo: $(REPO)
- job:
displayName: Test TensorFlow Pruning baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_tf_pruning"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_tf-pruning"
repo: $(REPO)
- stage: TFNewAPI_base
displayName: Unit Test TF newAPI baseline
dependsOn: []
jobs:
- job:
displayName: Test TF newAPI baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_adaptor_tfnewapi"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_tfnewapi"
repo: $(REPO)
- stage: ITEX_base
displayName: Unit Test ITEX baseline
dependsOn: []
jobs:
- job:
displayName: Test ITEX baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_itex"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_itex"
repo: $(REPO)
- stage: Others_base
displayName: Unit Test other cases baseline
dependsOn: []
jobs:
- job:
displayName: Test other cases baseline
steps:
- template: template/ut-template.yml
parameters:
dockerConfigName: "gitCloneDockerConfig"
utScriptFileName: "run_basic_others"
uploadPath: $(UPLOAD_PATH)
utArtifact: "ut-base_others"
repo: $(REPO)
- stage: Coverage
displayName: "Coverage Compare"
pool:
vmImage: "ubuntu-latest"
dependsOn: [Adaptor, API, Pruning, TFNewAPI, ITEX, Others, Adaptor_base, API_base, Pruning_base, TFNewAPI_base, ITEX_base, Others_base]
jobs:
- job: CollectDatafiles
steps:
- script: |
if [[ ! $(docker images | grep -i ${IMAGE_NAME}:${IMAGE_TAG}) ]]; then
docker build -f ${BUILD_SOURCESDIRECTORY}/.azure-pipelines/docker/Dockerfile.devel -t ${IMAGE_NAME}:${IMAGE_TAG} .
fi
docker images | grep -i ${IMAGE_NAME}
if [[ $? -ne 0 ]]; then
echo "NO Such Repo"
exit 1
fi
displayName: "Build develop docker image"
- task: DownloadPipelineArtifact@2
inputs:
artifact:
patterns: '*_coverage/.coverage.*'
path: $(DOWNLOAD_PATH)
- script: |
echo "--- create container ---"
docker run -d -it --name="collectLogs" -v ${BUILD_SOURCESDIRECTORY}:/neural-compressor ${IMAGE_NAME}:${IMAGE_TAG} /bin/bash
echo "--- docker ps ---"
docker ps
echo "--- collect logs ---"
docker exec collectLogs /bin/bash +x -c "cd /neural-compressor/.azure-pipelines/scripts \
&& bash install_nc.sh \
&& bash ut/collect_log.sh"
displayName: "Collect UT Coverage"
- task: PublishCodeCoverageResults@2
inputs:
summaryFileLocation: $(Build.SourcesDirectory)/log_dir/coverage_PR/coverage.xml
- task: PublishPipelineArtifact@1
condition: succeededOrFailed()
inputs:
targetPath: $(UPLOAD_PATH)
artifact: $(ARTIFACT_NAME)
publishLocation: "pipeline"
- task: Bash@3
condition: always()
inputs:
targetType: "inline"
script: |
docker exec collectLogs bash -c "rm -fr /neural-compressor/* && rm -fr /neural-compressor/.* || true"
displayName: "Docker clean up"