update,
This commit is contained in:
@@ -0,0 +1,181 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright (c) 2022 Intel Corporation
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import time
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
|
||||
from neural_compressor.utils import logger
|
||||
|
||||
test_mode = "accuracy"
|
||||
|
||||
|
||||
def build_model():
|
||||
# Load MNIST dataset
|
||||
mnist = keras.datasets.mnist
|
||||
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
|
||||
|
||||
# Normalize the input image so that each pixel value is between 0 to 1.
|
||||
train_images = train_images / 255.0
|
||||
test_images = test_images / 255.0
|
||||
|
||||
# Define the model architecture.
|
||||
model = keras.Sequential(
|
||||
[
|
||||
keras.layers.InputLayer(input_shape=(28, 28)),
|
||||
keras.layers.Reshape(target_shape=(28, 28, 1)),
|
||||
keras.layers.Conv2D(filters=12, kernel_size=(3, 3), activation="relu"),
|
||||
keras.layers.MaxPooling2D(pool_size=(2, 2)),
|
||||
keras.layers.Flatten(),
|
||||
keras.layers.Dense(10),
|
||||
]
|
||||
)
|
||||
# Train the digit classification model
|
||||
model.compile(
|
||||
optimizer="adam", loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=["accuracy"]
|
||||
)
|
||||
|
||||
model.fit(
|
||||
train_images,
|
||||
train_labels,
|
||||
epochs=1,
|
||||
validation_split=0.1,
|
||||
)
|
||||
|
||||
_, baseline_model_accuracy = model.evaluate(test_images, test_labels, verbose=0)
|
||||
|
||||
print("Baseline test accuracy:", baseline_model_accuracy)
|
||||
model.save("baseline_model")
|
||||
|
||||
|
||||
def build_dataset():
|
||||
# Load the data and split it between train and test sets
|
||||
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
|
||||
|
||||
# Scale images to the [0, 1] range
|
||||
x_train = x_train.astype("float32") / 255
|
||||
x_test = x_test.astype("float32") / 255
|
||||
# Make sure images have shape (28, 28, 1)
|
||||
x_train = np.expand_dims(x_train, -1)
|
||||
x_test = np.expand_dims(x_test, -1)
|
||||
|
||||
# convert class vectors to binary class matrices
|
||||
y_train = keras.utils.to_categorical(y_train, 10)
|
||||
y_test = keras.utils.to_categorical(y_test, 10)
|
||||
return x_train, y_train, x_test, y_test
|
||||
|
||||
|
||||
def eval_func(model):
|
||||
x_train, y_train, x_test, y_test = build_dataset()
|
||||
start = time.time()
|
||||
model.compile(metrics=["accuracy"], run_eagerly=False)
|
||||
score = model.evaluate(x_test, y_test)
|
||||
end = time.time()
|
||||
|
||||
if test_mode == "performance":
|
||||
latency = end - start
|
||||
print("Latency: {:.3f} ms".format(latency * 1000))
|
||||
print("Throughput: {:.3f} data/sec".format(1.0 / latency))
|
||||
return score[1]
|
||||
|
||||
|
||||
class Dataset(object):
|
||||
def __init__(self, batch_size=100):
|
||||
mnist = keras.datasets.mnist
|
||||
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
|
||||
|
||||
# Normalize the input image so that each pixel value is between 0 to 1.
|
||||
self.train_images = train_images / 255.0
|
||||
self.test_images = test_images / 255.0
|
||||
self.train_labels = train_labels
|
||||
self.test_labels = test_labels
|
||||
|
||||
def __len__(self):
|
||||
return len(self.test_images)
|
||||
|
||||
def __getitem__(self, idx):
|
||||
return self.test_images[idx], self.test_labels[idx]
|
||||
|
||||
|
||||
class TestKerasInKerasOut(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
os.environ["ITEX_ONEDNN_GRAPH"] = "1"
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(self):
|
||||
shutil.rmtree("baseline_model", ignore_errors=True)
|
||||
shutil.rmtree("itex_qdq_keras_model", ignore_errors=True)
|
||||
|
||||
def test_keras_in_keras_out(self):
|
||||
logger.info("Run test_keras_in_keras_out case...")
|
||||
global test_mode
|
||||
test_mode = "accuracy"
|
||||
build_model()
|
||||
|
||||
from neural_compressor import set_random_seed
|
||||
from neural_compressor.config import PostTrainingQuantConfig
|
||||
from neural_compressor.data.dataloaders.dataloader import DataLoader
|
||||
from neural_compressor.quantization import fit
|
||||
|
||||
set_random_seed(9527)
|
||||
config = PostTrainingQuantConfig(backend="itex")
|
||||
logger.info("=================Run Quantization...")
|
||||
q_model = fit(
|
||||
keras.models.load_model("./baseline_model"),
|
||||
conf=config,
|
||||
calib_dataloader=DataLoader(framework="tensorflow", dataset=Dataset()),
|
||||
eval_func=eval_func,
|
||||
)
|
||||
q_model.save("itex_qdq_keras_model")
|
||||
self.assertEqual(q_model.framework(), "keras")
|
||||
|
||||
framework_config = {"framework": "keras", "approach": "post_training_static_quant"}
|
||||
q_model.q_config = framework_config
|
||||
self.assertEqual(q_model.q_config["framework"], "keras")
|
||||
self.assertEqual(q_model.graph_info, None)
|
||||
self.assertEqual(q_model.framework(), "keras")
|
||||
self.assertEqual(isinstance(q_model.model, tf.keras.Model), True)
|
||||
|
||||
model = keras.models.load_model("./itex_qdq_keras_model")
|
||||
model.summary()
|
||||
found_quantize = False
|
||||
found_dequantize = False
|
||||
for layer in model.layers:
|
||||
if "quantize" in layer.name:
|
||||
found_quantize = True
|
||||
if "dequantize" in layer.name:
|
||||
found_dequantize = True
|
||||
self.assertEqual(found_quantize, True)
|
||||
self.assertEqual(found_dequantize, True)
|
||||
|
||||
from neural_compressor.benchmark import fit
|
||||
from neural_compressor.config import BenchmarkConfig
|
||||
|
||||
conf = BenchmarkConfig(backend="itex", iteration=100, cores_per_instance=1, num_of_instance=1)
|
||||
logger.info("=================Run BenchMark...")
|
||||
test_mode = "performance"
|
||||
fit(model, conf, b_func=eval_func)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
@@ -0,0 +1,189 @@
|
||||
import unittest
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow.compat.v1 import graph_util
|
||||
|
||||
from neural_compressor.adaptor.tf_utils.util import disable_random
|
||||
from neural_compressor.config import PostTrainingQuantConfig
|
||||
from neural_compressor.data.dataloaders.dataloader import DataLoader
|
||||
from neural_compressor.quantization import fit
|
||||
from neural_compressor.utils.utility import set_random_seed
|
||||
|
||||
|
||||
class TestItexSmoothQuantTF(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(self):
|
||||
pass
|
||||
|
||||
@disable_random()
|
||||
def test_itex_conv_sq(self):
|
||||
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
|
||||
top_relu = tf.nn.relu(x)
|
||||
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
|
||||
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
|
||||
conv_weights = tf.compat.v1.get_variable(
|
||||
"weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
|
||||
normed = tf.compat.v1.layers.batch_normalization(conv)
|
||||
|
||||
conv_weights2 = tf.compat.v1.get_variable(
|
||||
"weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME")
|
||||
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
|
||||
add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2")
|
||||
relu = tf.nn.relu(add)
|
||||
relu6 = tf.nn.relu6(relu, name="op_to_store")
|
||||
|
||||
out_name = relu6.name.split(":")[0]
|
||||
with tf.compat.v1.Session() as sess:
|
||||
sess.run(tf.compat.v1.global_variables_initializer())
|
||||
output_graph_def = graph_util.convert_variables_to_constants(
|
||||
sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]
|
||||
)
|
||||
|
||||
set_random_seed(9527)
|
||||
config = PostTrainingQuantConfig(
|
||||
backend="itex",
|
||||
quant_level=1,
|
||||
recipes={"smooth_quant": True, "smooth_quant_args": {"alpha": 0.5}},
|
||||
calibration_sampling_size=[500],
|
||||
)
|
||||
|
||||
from neural_compressor.data import Datasets
|
||||
|
||||
dataset = Datasets("tensorflow")["dummy"](shape=(100, 56, 56, 16), label=True)
|
||||
dataloader = DataLoader(framework="tensorflow", dataset=dataset, batch_size=1)
|
||||
from neural_compressor import Metric
|
||||
|
||||
top1 = Metric(name="topk", k=1)
|
||||
output_graph = fit(
|
||||
model=output_graph_def,
|
||||
conf=config,
|
||||
calib_dataloader=dataloader,
|
||||
eval_dataloader=dataloader,
|
||||
eval_metric=top1,
|
||||
)
|
||||
|
||||
mul_count = 0
|
||||
for i in output_graph.graph_def.node:
|
||||
if i.op == "Mul":
|
||||
mul_count += 1
|
||||
|
||||
self.assertEqual(mul_count, 2)
|
||||
|
||||
@disable_random()
|
||||
def test_itex_sq_matmul(self):
|
||||
x_data = np.random.rand(1024, 1024).astype(np.float32)
|
||||
y_data = np.random.rand(1024, 1024).astype(np.float32)
|
||||
import tensorflow.compat.v1 as tf
|
||||
|
||||
x = tf.placeholder(tf.float32, shape=[1024, 1024], name="x")
|
||||
y = tf.constant(y_data, dtype=tf.float32, shape=[1024, 1024])
|
||||
z = tf.matmul(x, y)
|
||||
bias = np.random.rand(1024).astype(np.float32)
|
||||
z = tf.nn.bias_add(z, bias)
|
||||
z = tf.nn.relu(z, name="op_to_store")
|
||||
|
||||
with tf.Session() as sess:
|
||||
sess.run(z, feed_dict={x: x_data, y: y_data})
|
||||
output_graph_def = sess.graph.as_graph_def()
|
||||
|
||||
set_random_seed(9527)
|
||||
config = PostTrainingQuantConfig(
|
||||
backend="itex",
|
||||
quant_level=1,
|
||||
recipes={"smooth_quant": True, "smooth_quant_args": {"alpha": 0.5}},
|
||||
calibration_sampling_size=[1024],
|
||||
)
|
||||
|
||||
from neural_compressor.data import Datasets
|
||||
|
||||
dataset = Datasets("tensorflow")["dummy"](shape=(1024, 1024), label=True)
|
||||
dataloader = DataLoader(framework="tensorflow", dataset=dataset, batch_size=1024)
|
||||
from neural_compressor import Metric
|
||||
|
||||
top1 = Metric(name="topk", k=1)
|
||||
output_graph = fit(
|
||||
model=output_graph_def,
|
||||
conf=config,
|
||||
calib_dataloader=dataloader,
|
||||
eval_dataloader=dataloader,
|
||||
eval_metric=top1,
|
||||
)
|
||||
|
||||
mul_count = 0
|
||||
for i in output_graph.graph_def.node:
|
||||
if i.op == "Mul":
|
||||
mul_count += 1
|
||||
|
||||
self.assertEqual(mul_count, 1)
|
||||
|
||||
@disable_random()
|
||||
def test_itex_sq_conv_matmul(self):
|
||||
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
|
||||
top_relu = tf.nn.relu(x)
|
||||
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
|
||||
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
|
||||
conv1_weights = tf.compat.v1.get_variable(
|
||||
"weight_conv1", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv1 = tf.nn.conv2d(x_pad, conv1_weights, strides=[1, 2, 2, 1], padding="VALID")
|
||||
matmul_weights = tf.compat.v1.get_variable(
|
||||
"weight_matmul", [28 * 28 * 16, 7 * 7 * 32], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv1_reshaped = tf.reshape(conv1, shape=[-1, 28 * 28 * 16])
|
||||
matmul = tf.matmul(conv1_reshaped, matmul_weights)
|
||||
reshape = tf.reshape(matmul, (1, 7, 7, 32))
|
||||
conv2_weights = tf.compat.v1.get_variable(
|
||||
"weight_conv2", [7, 7, 32, 1], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv2 = tf.nn.conv2d(reshape, conv2_weights, strides=[1, 2, 2, 1], padding="VALID")
|
||||
leaky_relu = tf.nn.leaky_relu(conv2, name="op_to_store")
|
||||
|
||||
out_name = leaky_relu.name.split(":")[0]
|
||||
with tf.compat.v1.Session() as sess:
|
||||
sess.run(tf.compat.v1.global_variables_initializer())
|
||||
output_graph_def = graph_util.convert_variables_to_constants(
|
||||
sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]
|
||||
)
|
||||
|
||||
set_random_seed(9527)
|
||||
config = PostTrainingQuantConfig(
|
||||
backend="itex",
|
||||
quant_level=1,
|
||||
recipes={"smooth_quant": True, "smooth_quant_args": {"alpha": 0.6}},
|
||||
calibration_sampling_size=[500],
|
||||
)
|
||||
|
||||
from neural_compressor.data import Datasets
|
||||
|
||||
dataset = Datasets("tensorflow")["dummy"](shape=(100, 56, 56, 16), label=True)
|
||||
dataloader = DataLoader(framework="tensorflow", dataset=dataset)
|
||||
from neural_compressor import Metric
|
||||
|
||||
top1 = Metric(name="topk", k=1)
|
||||
output_graph = fit(
|
||||
model=output_graph_def,
|
||||
conf=config,
|
||||
calib_dataloader=dataloader,
|
||||
eval_dataloader=dataloader,
|
||||
eval_metric=top1,
|
||||
)
|
||||
|
||||
mul_count = 0
|
||||
for i in output_graph.graph_def.node:
|
||||
if i.op == "Mul":
|
||||
mul_count += 1
|
||||
|
||||
self.assertEqual(mul_count, 3)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
@@ -0,0 +1,81 @@
|
||||
#
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
import unittest
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.compat.v1 import graph_util
|
||||
|
||||
from neural_compressor import set_random_seed
|
||||
from neural_compressor.adaptor.tf_utils.util import disable_random, version1_lt_version2
|
||||
from neural_compressor.config import PostTrainingQuantConfig
|
||||
from neural_compressor.data.dataloaders.dataloader import DataLoader
|
||||
from neural_compressor.quantization import fit
|
||||
|
||||
|
||||
class TestItexNewAPI(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(self):
|
||||
pass
|
||||
|
||||
@disable_random()
|
||||
@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0")
|
||||
def test_itex_new_api(self):
|
||||
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
|
||||
top_relu = tf.nn.relu(x)
|
||||
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
|
||||
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
|
||||
conv_weights = tf.compat.v1.get_variable(
|
||||
"weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
|
||||
normed = tf.compat.v1.layers.batch_normalization(conv)
|
||||
# relu = tf.nn.relu(normed)
|
||||
|
||||
conv_weights2 = tf.compat.v1.get_variable(
|
||||
"weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME")
|
||||
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
|
||||
# relu2 = tf.nn.relu(normed2)
|
||||
add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2")
|
||||
relu = tf.nn.relu(add)
|
||||
relu6 = tf.nn.relu6(relu, name="op_to_store")
|
||||
|
||||
out_name = relu6.name.split(":")[0]
|
||||
with tf.compat.v1.Session() as sess:
|
||||
sess.run(tf.compat.v1.global_variables_initializer())
|
||||
output_graph_def = graph_util.convert_variables_to_constants(
|
||||
sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]
|
||||
)
|
||||
|
||||
set_random_seed(9527)
|
||||
config = PostTrainingQuantConfig(backend="itex", quant_format="QDQ", calibration_sampling_size=[200])
|
||||
|
||||
from neural_compressor.data import Datasets
|
||||
|
||||
dataset = Datasets("tensorflow")["dummy"](shape=(100, 56, 56, 16), label=True)
|
||||
output_graph = fit(
|
||||
model=output_graph_def,
|
||||
conf=config,
|
||||
calib_dataloader=DataLoader(framework="tensorflow_itex", dataset=dataset, batch_size=1),
|
||||
)
|
||||
|
||||
dequant_count = 0
|
||||
quantize_count = 0
|
||||
for i in output_graph.graph_def.node:
|
||||
if i.op == "Dequantize":
|
||||
dequant_count += 1
|
||||
if i.op == "QuantizeV2":
|
||||
quantize_count += 1
|
||||
|
||||
self.assertEqual(dequant_count, 5)
|
||||
self.assertEqual(quantize_count, 4)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
@@ -0,0 +1,76 @@
|
||||
import shutil
|
||||
import unittest
|
||||
|
||||
import tensorflow as tf
|
||||
from tensorflow.compat.v1 import graph_util
|
||||
|
||||
from neural_compressor.adaptor.tf_utils.util import disable_random, version1_gte_version2, version1_lt_version2
|
||||
|
||||
|
||||
class TestConvertTensorflowQDQToOnnxQDQ(unittest.TestCase):
|
||||
@classmethod
|
||||
def setUpClass(self):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(self):
|
||||
if version1_gte_version2(tf.version.VERSION, "2.8.0"):
|
||||
shutil.rmtree("workspace")
|
||||
|
||||
@disable_random()
|
||||
@unittest.skipIf(version1_lt_version2(tf.version.VERSION, "2.8.0"), "Only supports tf greater 2.7.0")
|
||||
def test_convert_tf_fp32_to_onnx_fp32(self):
|
||||
x = tf.compat.v1.placeholder(tf.float32, [1, 56, 56, 16], name="input")
|
||||
top_relu = tf.nn.relu(x)
|
||||
paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])
|
||||
x_pad = tf.pad(top_relu, paddings, "CONSTANT")
|
||||
conv_weights = tf.compat.v1.get_variable(
|
||||
"weight", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv = tf.nn.conv2d(x_pad, conv_weights, strides=[1, 2, 2, 1], padding="VALID")
|
||||
normed = tf.compat.v1.layers.batch_normalization(conv)
|
||||
|
||||
conv_weights2 = tf.compat.v1.get_variable(
|
||||
"weight2", [3, 3, 16, 16], initializer=tf.compat.v1.random_normal_initializer()
|
||||
)
|
||||
conv2 = tf.nn.conv2d(top_relu, conv_weights2, strides=[1, 2, 2, 1], padding="SAME")
|
||||
normed2 = tf.compat.v1.layers.batch_normalization(conv2)
|
||||
add = tf.raw_ops.Add(x=normed, y=normed2, name="addv2")
|
||||
relu = tf.nn.relu(add)
|
||||
relu6 = tf.nn.relu6(relu, name="op_to_store")
|
||||
|
||||
out_name = relu6.name.split(":")[0]
|
||||
with tf.compat.v1.Session() as sess:
|
||||
sess.run(tf.compat.v1.global_variables_initializer())
|
||||
output_graph_def = graph_util.convert_variables_to_constants(
|
||||
sess=sess, input_graph_def=sess.graph_def, output_node_names=[out_name]
|
||||
)
|
||||
|
||||
from neural_compressor.config import TF2ONNXConfig
|
||||
from neural_compressor.model import Model
|
||||
|
||||
inc_model = Model(output_graph_def)
|
||||
config = TF2ONNXConfig(dtype="fp32")
|
||||
inc_model.export("workspace/tf_fp32_to_onnx_fp32.onnx", config)
|
||||
|
||||
import onnx
|
||||
|
||||
onnx_model = onnx.load("workspace/tf_fp32_to_onnx_fp32.onnx")
|
||||
onnx.checker.check_model(onnx_model)
|
||||
|
||||
import onnxruntime as ort
|
||||
|
||||
from neural_compressor.data import DATALOADERS, Datasets
|
||||
|
||||
ort_session = ort.InferenceSession("workspace/tf_fp32_to_onnx_fp32.onnx")
|
||||
dataset = Datasets("tensorflow")["dummy"]((100, 56, 56, 16))
|
||||
dataloader = DATALOADERS["tensorflow"](dataset)
|
||||
it = iter(dataloader)
|
||||
input = next(it)
|
||||
input_dict = {"input:0": input[0]}
|
||||
outputs = ort_session.run(None, input_dict)
|
||||
self.assertNotEqual(outputs, None)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
Reference in New Issue
Block a user