ONNX Backends for onnxruntime1#

Backend class: OnnxInferenceBackendOrt.

<<<

import unittest
import sys
from datetime import datetime
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from onnx.backend.test import BackendTest
from onnx import __version__ as onnx_version
from onnxruntime import __version__ as ort_version
from numpy import __version__ as npy_version
import mlprodict.onnxrt.backend_ort as backend

back_test = BackendTest(backend, __name__)
back_test.include('.*_cpu')
back_test.exclude('.*_blvc_.*')
back_test.exclude('.*_densenet_.*')
back_test.exclude('.*_densenet121_.*')
back_test.exclude('.*_inception_.*')
back_test.exclude('.*_resnet50_.*')
back_test.exclude('.*_shufflenet_.*')
back_test.exclude('.*_squeezenet_.*')
back_test.exclude('.*_vgg19_.*')
back_test.exclude('.*_zfnet512_.*')
globals().update(back_test.enable_report().test_cases)

print('---------------------------------')
print('python', sys.version)
print('onnx', onnx_version)
print('onnxruntime', ort_version)
print('numpy', npy_version)
print('---------------------------------')
print(datetime.now(), "BEGIN")
print('---------------------------------')

buffer = StringIO()
if True:
    with redirect_stdout(buffer):
        with redirect_stderr(buffer):
            res = unittest.main(verbosity=2, exit=False)
else:
    res = unittest.main(verbosity=2, exit=False)

testsRun = res.result.testsRun
errors = len(res.result.errors)
skipped = len(res.result.skipped)
unexpectedSuccesses = len(res.result.unexpectedSuccesses)
expectedFailures = len(res.result.expectedFailures)

print('---------------------------------')
print(datetime.now(), "END")
print('---------------------------------')

print("testsRun=%d errors=%d skipped=%d" % (testsRun, errors, skipped))
print("unexpectedSuccesses=%d expectedFailures=%d" % (
    unexpectedSuccesses, expectedFailures))
ran = testsRun - skipped
print("ratio=%f" % (1 - errors * 1.0 / ran))
print('---------------------------------')
lines = buffer.getvalue().split('\n')
print("\n".join(line for line in lines
      if "skipped 'no matched include pattern'" not in line))

>>>

    ---------------------------------
    python 3.9.1 (default, Jan 18 2021, 16:35:58) 
    [GCC 8.3.0]
    onnx 1.11.0
    onnxruntime 1.11.0
    numpy 1.21.5
    ---------------------------------
    2022-04-05 07:13:34.050304 BEGIN
    ---------------------------------
    ---------------------------------
    2022-04-05 07:14:19.831660 END
    ---------------------------------
    testsRun=2026 errors=83 skipped=1021
    unexpectedSuccesses=0 expectedFailures=0
    ratio=0.917413
    ---------------------------------
    test_abs_cpu (__main__.OnnxBackendNodeModelTest) ... /var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/npy/xop.py:16: DeprecationWarning: Please use `coo_matrix` from the `scipy.sparse` namespace, the `scipy.sparse.coo` namespace is deprecated.
      from scipy.sparse.coo import coo_matrix
    /usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py:188: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe. 
    Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
      if ref_outputs[i].dtype == np.object:
    ok
    test_acos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_acos_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_acosh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_acosh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_adagrad_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_adagrad_multiple_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_adam_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_adam_multiple_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_add_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_add_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_add_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_and2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and_bcast3v1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and_bcast3v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and_bcast4v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and_bcast4v3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_and_bcast4v4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_default_axis_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_default_axis_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_default_axis_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_default_axis_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_negative_axis_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_negative_axis_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_negative_axis_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_negative_axis_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_no_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_no_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_no_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmax_no_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_default_axis_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_default_axis_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_default_axis_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_default_axis_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_negative_axis_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_negative_axis_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_negative_axis_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_negative_axis_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_no_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_no_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_no_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_argmin_no_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_asin_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_asin_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_asinh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_asinh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_atan_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_atan_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_atanh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_atanh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_1d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_ceil_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_pads_count_include_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_precomputed_pads_count_include_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_precomputed_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_precomputed_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_precomputed_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_same_lower_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_2d_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_averagepool_3d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_basic_conv_with_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_basic_conv_without_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_basic_convinteger_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_batchnorm_epsilon_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_batchnorm_epsilon_training_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_batchnorm_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_batchnorm_example_training_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_bernoulli_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_bernoulli_double_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_bernoulli_double_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_bernoulli_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_bernoulli_seed_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_bernoulli_seed_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_bitshift_left_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_bitshift_left_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_bitshift_left_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_bitshift_left_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_bitshift_right_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_bitshift_right_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_bitshift_right_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_bitshift_right_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_cast_DOUBLE_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_DOUBLE_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_FLOAT16_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_FLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_cast_FLOAT_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_FLOAT_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cast_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_cast_STRING_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_castlike_BFLOAT16_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_castlike_DOUBLE_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_DOUBLE_to_FLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_DOUBLE_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_DOUBLE_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT16_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT16_to_DOUBLE_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT16_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_castlike_FLOAT_to_BFLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_castlike_FLOAT_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT_to_DOUBLE_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT_to_FLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_castlike_FLOAT_to_STRING_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_castlike_STRING_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_castlike_STRING_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_ceil_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_ceil_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_celu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_celu_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_default_inbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_default_int8_inbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_default_int8_max_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_default_int8_min_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_default_max_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_default_min_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_inbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_outbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_clip_splitbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_compress_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_compress_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_compress_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_compress_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_1d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_1d_axis_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_2d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_2d_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_2d_axis_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_2d_axis_negative_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_3d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_3d_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_3d_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_3d_axis_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_3d_axis_negative_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_concat_3d_axis_negative_3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_constant_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_constant_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_constantofshape_float_ones_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_constantofshape_int_shape_zero_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_constantofshape_int_zeros_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_conv_with_autopad_same_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_conv_with_strides_and_asymmetric_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_conv_with_strides_no_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_conv_with_strides_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convinteger_with_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convinteger_without_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_autopad_same_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_convtranspose_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_dilations_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_kernel_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_output_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_convtranspose_with_kernel_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cos_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cosh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cosh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_1d_exclusive_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_1d_reverse_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_1d_reverse_exclusive_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_2d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_2d_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_cumsum_2d_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_depthtospace_crd_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_depthtospace_crd_mode_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_depthtospace_dcr_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_depthtospace_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dequantizelinear_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dequantizelinear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_det_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_det_nd_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_div_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_div_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_div_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_div_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_dropout_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dropout_default_mask_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dropout_default_mask_ratio_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dropout_default_old_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dropout_default_ratio_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dropout_random_old_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dynamicquantizelinear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dynamicquantizelinear_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dynamicquantizelinear_max_adjusted_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dynamicquantizelinear_max_adjusted_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dynamicquantizelinear_min_adjusted_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_dynamicquantizelinear_min_adjusted_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_edge_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_einsum_batch_diagonal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_einsum_batch_matmul_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_einsum_inner_prod_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_einsum_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_einsum_transpose_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_elu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_elu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_elu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_equal_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_equal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_erf_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_exp_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_exp_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_expand_dim_changed_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_expand_dim_unchanged_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_eyelike_populate_off_main_diagonal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_eyelike_with_dtype_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_eyelike_without_dtype_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_axis0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_axis1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_axis2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_axis3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_negative_axis1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_negative_axis2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_negative_axis3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_flatten_negative_axis4_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_floor_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_floor_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_2d_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_elements_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_elements_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_elements_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gather_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gathernd_example_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gathernd_example_int32_batch_dim1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gathernd_example_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_all_attributes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_alpha_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_beta_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_default_matrix_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_default_no_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_default_scalar_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_default_single_elem_vector_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_default_vector_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_default_zero_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_transposeA_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gemm_transposeB_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_globalaveragepool_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_globalaveragepool_precomputed_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_globalmaxpool_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_globalmaxpool_precomputed_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_greater_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_greater_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_greater_equal_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_greater_equal_bcast_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_greater_equal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_greater_equal_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_aligncorners_true_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_bicubic_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_bilinear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_border_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_reflection_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gridsample_zeros_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gru_batchwise_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_gru_defaults_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gru_seq_length_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_gru_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardmax_one_hot_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardsigmoid_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardsigmoid_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardsigmoid_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardswish_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_hardswish_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_identity_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_identity_opt_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_identity_sequence_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_if_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_if_opt_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_if_seq_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_instancenorm_epsilon_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_instancenorm_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_isinf_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_isinf_negative_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_isinf_positive_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_isnan_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_leakyrelu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_leakyrelu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_leakyrelu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_less_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_less_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_less_equal_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_less_equal_bcast_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_less_equal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_less_equal_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_log_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_log_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_axis_0_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_axis_1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_axis_2_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_default_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_example_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_example_1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_large_number_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_large_number_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_logsoftmax_negative_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_loop11_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_loop13_seq_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_loop16_seq_none_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_lrn_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_lrn_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_lstm_batchwise_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_lstm_defaults_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_lstm_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_lstm_with_peepholes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_matmul_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_matmul_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_matmul_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_matmulinteger_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_float16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_float64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_int16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_max_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_int8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_max_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_max_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_max_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_maxpool_1d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_ceil_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_dilations_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_precomputed_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_precomputed_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_precomputed_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_same_lower_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_2d_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_3d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_with_argmax_2d_precomputed_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxpool_with_argmax_2d_precomputed_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_maxunpool_export_with_output_shape_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_maxunpool_export_without_output_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mean_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mean_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mean_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_float16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_float64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_int16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_min_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_int8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_min_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_min_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_min_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_mod_broadcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_int64_fmod_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_float16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_float64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_int16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_mixed_sign_int8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mod_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_momentum_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_momentum_multiple_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_mul_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mul_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mul_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mul_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_mvn_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_mvn_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_neg_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nesterov_momentum_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_nllloss_NC_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NC_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_mean_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_mean_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_weight_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_no_weight_reduction_mean_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_no_weight_reduction_mean_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_reduction_mean_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_reduction_mean_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_reduction_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_reduction_sum_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_reduction_mean_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_reduction_mean_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_reduction_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_reduction_sum_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_reduction_sum_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3_none_no_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3_sum_weight_high_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3d4d5_mean_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3d4d5_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3d4d5_none_no_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_center_point_box_format_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_flipped_coordinates_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_identical_boxes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_limit_output_size_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_single_box_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_suppress_by_IOU_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_two_batches_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonmaxsuppression_two_classes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_nonzero_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_not_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_not_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_not_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_onehot_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_onehot_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_onehot_with_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_onehot_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_optional_get_element_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_optional_get_element_sequence_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_optional_has_element_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_optional_has_element_empty_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_or2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or_bcast3v1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or_bcast3v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or_bcast4v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or_bcast4v3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_or_bcast4v4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_bcast_array_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_bcast_scalar_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_float32_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_float32_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_float32_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_pow_types_float32_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_pow_types_float_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_int32_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_int32_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_int64_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_int64_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_pow_types_int_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_prelu_broadcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_prelu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_qlinearconv_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_qlinearmatmul_2D_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_qlinearmatmul_3D_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_quantizelinear_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_quantizelinear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_range_float_type_positive_delta_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_range_float_type_positive_delta_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_range_int32_type_negative_delta_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_range_int32_type_negative_delta_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reciprocal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reciprocal_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_negative_axes_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l1_negative_axes_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_negative_axes_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_l2_negative_axes_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_asc_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_desc_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_exp_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_log_sum_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_default_axes_keepdim_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_max_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_mean_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_min_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_prod_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_empty_axes_input_noop_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_empty_axes_input_noop_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reduce_sum_square_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reflect_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_relu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_allowzero_reordered_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_reshape_extended_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_negative_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_negative_extended_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_one_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_reduced_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_reordered_all_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_reordered_last_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_zero_and_negative_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reshape_zero_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_resize_downsample_scales_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_resize_downsample_scales_linear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_scales_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_sizes_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_sizes_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_tf_crop_and_resize_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_cubic_asymmetric_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_linear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_scales_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_sizes_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_sizes_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_sizes_nearest_floor_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reversesequence_batch_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_reversesequence_time_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_rnn_seq_length_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_roialign_aligned_false_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_roialign_aligned_true_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_round_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scan9_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scan_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatter_elements_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatter_elements_with_duplicate_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatter_elements_with_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatter_elements_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatter_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatter_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatternd_add_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatternd_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_scatternd_multiply_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1_mean_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1_mean_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1_mean_weight_negative_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_none_no_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_sum_weight_high_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_sum_weight_high_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_mean_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_mean_weight_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_none_no_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_none_no_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_3d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_3d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_4d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_4d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_4d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_no_weight_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_3d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_4d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_4d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_4d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_mean_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_weights_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_weights_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_weights_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_none_weights_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_sum_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_sum_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sce_sum_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_selu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_selu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_selu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sequence_insert_at_back_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_sequence_insert_at_front_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_shape_clip_end_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_clip_start_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_end_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_end_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_start_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_start_1_end_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_start_1_end_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shape_start_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shrink_hard_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_shrink_soft_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sigmoid_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sigmoid_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sign_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_simple_rnn_batchwise_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_simple_rnn_defaults_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_simple_rnn_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sin_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sin_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sinh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sinh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_size_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_size_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_default_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_default_steps_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_end_out_of_bounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_neg_steps_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_slice_start_out_of_bounds_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_softmax_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_axis_0_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_axis_1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_axis_2_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_default_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_example_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_large_number_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_large_number_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softmax_negative_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softplus_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softplus_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softsign_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_softsign_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_spacetodepth_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_spacetodepth_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_equal_parts_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_equal_parts_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_equal_parts_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_variable_parts_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_variable_parts_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_variable_parts_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_split_zero_size_splits_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sqrt_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sqrt_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_squeeze_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_squeeze_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_strnormalizer_export_monday_casesensintive_lower_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_strnormalizer_export_monday_casesensintive_nochangecase_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_strnormalizer_export_monday_casesensintive_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_strnormalizer_export_monday_empty_output_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_strnormalizer_export_monday_insensintive_upper_twodim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_strnormalizer_nostopwords_nochangecase_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sub_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sub_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sub_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sub_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_sum_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sum_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_sum_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tan_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tan_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tanh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tanh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_only_bigrams_skip0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_onlybigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_thresholdedrelu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_thresholdedrelu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_thresholdedrelu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tile_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tile_precomputed_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_top_k_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_top_k_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_top_k_smallest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_training_dropout_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_training_dropout_default_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_training_dropout_default_mask_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_training_dropout_mask_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
    test_training_dropout_zero_ratio_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_training_dropout_zero_ratio_mask_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_all_permutations_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_all_permutations_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_all_permutations_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_all_permutations_3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_all_permutations_4_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_all_permutations_5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_transpose_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_one_row_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_out_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_out_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_square_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_square_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_tril_zero_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_triu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_one_row_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_out_neg_out_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_out_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_square_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_square_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_triu_zero_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
    test_unique_not_sorted_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unique_sorted_with_axis_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unique_sorted_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unique_sorted_with_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unique_sorted_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_axis_3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_three_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_two_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_unsqueeze_unsorted_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_upsample_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_where_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_where_long_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor_bcast3v1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor_bcast3v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor_bcast4v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor_bcast4v3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_xor_bcast4v4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
    test_AvgPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_AvgPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_AvgPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_AvgPool2d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_AvgPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_AvgPool3d_stride1_pad0_gpu_input_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_AvgPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_BatchNorm1d_3d_input_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_BatchNorm2d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_BatchNorm2d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_BatchNorm3d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_BatchNorm3d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_ConstantPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_pad1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_pad1size1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_pad2_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_pad2size1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_depthwise_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_depthwise_padded_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_depthwise_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_depthwise_with_multiplier_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_groups_thnn_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv2d_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_dilated_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Conv3d_stride_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_ConvTranspose2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_ConvTranspose2d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_ELU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Embedding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Embedding_sparse_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_GLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_GLU_dim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_LeakyReLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_LeakyReLU_with_negval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Linear_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_Linear_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_LogSoftmax_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool1d_stride_padding_dilation_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool2d_stride_padding_dilation_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_MaxPool3d_stride_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_PReLU_1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_PReLU_1d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_PReLU_2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_PReLU_2d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_PReLU_3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_PReLU_3d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_PixelShuffle_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_PoissonNLLLLoss_no_reduce_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_ReLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_ReflectionPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_ReplicationPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_SELU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Sigmoid_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Softmax_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Softmin_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Softplus_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_Softsign_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
    test_Tanh_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_ZeroPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_log_softmax_dim3_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_log_softmax_lastdim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_softmax_functional_dim3_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_softmax_lastdim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
    test_operator_add_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_add_size1_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_add_size1_right_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_add_size1_singleton_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_addconstant_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_addmm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_basic_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_chunk_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_clip_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_concat2_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_conv_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_convtranspose_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_exp_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_flatten_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_index_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_max_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_maxpool_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_min_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_mm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_non_float_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_pad_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_permute2_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_pow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
    test_operator_reduced_mean_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_reduced_mean_keepdim_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_reduced_sum_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_reduced_sum_keepdim_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_repeat_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_repeat_dim_overflow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_selu_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_sqrt_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_symbolic_override_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_symbolic_override_nested_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_operator_view_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ok
    test_bvlc_alexnet_cpu (__main__.OnnxBackendRealModelTest) ... ok
    test_densenet121_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_densenet121_.*"'
    test_densenet121_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_densenet121_.*"'
    test_inception_v1_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
    test_inception_v1_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
    test_inception_v2_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
    test_inception_v2_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
    test_resnet50_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_resnet50_.*"'
    test_resnet50_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_resnet50_.*"'
    test_shufflenet_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_shufflenet_.*"'
    test_shufflenet_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_shufflenet_.*"'
    test_squeezenet_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_squeezenet_.*"'
    test_squeezenet_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_squeezenet_.*"'
    test_vgg19_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_vgg19_.*"'
    test_vgg19_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_vgg19_.*"'
    test_zfnet512_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_zfnet512_.*"'
    test_zfnet512_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_zfnet512_.*"'
    test_expand_shape_model1_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_expand_shape_model2_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_expand_shape_model3_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_expand_shape_model4_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_gradient_of_add_and_mul_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
    test_gradient_of_add_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
    test_sequence_model1_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model2_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model3_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model4_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model5_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model6_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model7_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sequence_model8_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_shrink_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_sign_model_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_single_relu_model_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_strnorm_model_monday_casesensintive_lower_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_strnorm_model_monday_casesensintive_nochangecase_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_strnorm_model_monday_casesensintive_upper_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_strnorm_model_monday_empty_output_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_strnorm_model_monday_insensintive_upper_twodim_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    test_strnorm_model_nostopwords_nochangecase_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
    
    ======================================================================
    ERROR: test_adagrad_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Adagrad is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Adagrad is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X"
        input: "G"
        input: "H"
        output: "X_new"
        output: "H_new"
        op_type: "Adagrad"
        attribute {
          name: "decay_factor"
          f: 0.10000000149011612
          type: FLOAT
        }
        attribute {
          name: "epsilon"
          f: 9.999999747378752e-06
          type: FLOAT
        }
        attribute {
          name: "norm_coefficient"
          f: 0.0010000000474974513
          type: FLOAT
        }
        domain
    [...]
        dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "X_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "H_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_adagrad_multiple_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Adagrad is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Adagrad is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X1"
        input: "X2"
        input: "G1"
        input: "G2"
        input: "H1"
        input: "H2"
        output: "X1_new"
        output: "X2_new"
        output: "H1_new"
        output: "H2_new"
        op_type: "Adagrad"
        attribute {
          name: "decay_factor"
          f: 0.10000000149011612
          type: FLOAT
        }
        attribute {
          name: "epsilon"
          f: 9.999999747378752e-06
          type: FLOAT
        }
        attribute {
     
    [...]
      dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "H1_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "H2_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_adam_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Adam is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Adam is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X"
        input: "G"
        input: "V"
        input: "H"
        output: "X_new"
        output: "V_new"
        output: "H_new"
        op_type: "Adam"
        attribute {
          name: "alpha"
          f: 0.949999988079071
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 0.10000000149011612
          type: FLOAT
        }
        attribute {
          name: "epsilon"
          f: 1.0000000116860974e-07
          type: FLOAT
        }
    
    [...]
        dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "V_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "H_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_adam_multiple_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Adam is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Adam is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X1"
        input: "X2"
        input: "G1"
        input: "G2"
        input: "V1"
        input: "V2"
        input: "H1"
        input: "H2"
        output: "X1_new"
        output: "X2_new"
        output: "V1_new"
        output: "V2_new"
        output: "H1_new"
        output: "H2_new"
        op_type: "Adam"
        attribute {
          name: "alpha"
          f: 0.949999988079071
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 0.85
    [...]
      dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "H1_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "H2_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_add_uint8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(14) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "sum"
        op_type: "Add"
      }
      name: "test_add_uint8"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            e
    [...]
            dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "sum"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_bitshift_left_uint16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BitShift(11) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BitShift(11) node with name '''
    ir_version: 5
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "BitShift"
        attribute {
          name: "direction"
          s: "LEFT"
          type: STRING
        }
      }
      name: "test_bitshift_left_uint16"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 11
    }
    .
    
    ======================================================================
    ERROR: test_bitshift_right_uint16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BitShift(11) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BitShift(11) node with name '''
    ir_version: 5
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "BitShift"
        attribute {
          name: "direction"
          s: "RIGHT"
          type: STRING
        }
      }
      name: "test_bitshift_right_uint16"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 11
    }
    .
    
    ======================================================================
    ERROR: test_cast_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
        elem_type = _elem_type_as_str(t.elem_type)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: elem_type '16' is unknown
    fields:
    ['__abs__',
     '__add__',
     '__and__',
     '__bool__',
     '__ceil__',
     '__class__',
     '__delattr__',
     '__dir__',
     '__divmod__',
     '__doc__',
     '__eq__',
     '__float__',
     '__floor__',
     '__floordiv__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getnewargs__',
     '__gt__',
     '__hash__',
     '__index__',
     '__init__',
     '__init_subclass__',
     '__int__',
     '__invert__',
     '__le__',
     '__lshift__',
     '__lt__',
     '__mod__',
     '__mul__',
     '__ne__',
     '__neg__',
     '__new__',
     '__or__',
     '__pos__',
     '__pow__',
     '__radd__',
     '__rand__',
     '__rdivmod__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__rfloordiv__',
     '__rlshift__',
     '__rmod__',
     '__rmul__',
     '__ror__',
     '__round__',
     '__rpow__',
     '__rrshift__',
     '__rshift__',
     '__rsub__',
     '__rtruediv__',
     '__rxor__',
     '__setattr__',
     '__sizeof__',
     '__str__',
     '__sub__',
     '__subclasshook__',
     '__truediv__',
     '__trunc__',
     '__xor__',
     'as_integer_ratio',
     'bit_length',
     'conjugate',
     'denominator',
     'from_bytes',
     'imag',
     'numerator',
     'real',
     'to_bytes']
    -----
    <class 'int'>.
    
    ======================================================================
    ERROR: test_cast_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 605, in to_sequence
        outputs[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
        elem_type = _elem_type_as_str(t.elem_type)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: elem_type '16' is unknown
    fields:
    ['__abs__',
     '__add__',
     '__and__',
     '__bool__',
     '__ceil__',
     '__class__',
     '__delattr__',
     '__dir__',
     '__divmod__',
     '__doc__',
     '__eq__',
     '__float__',
     '__floor__',
     '__floordiv__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getnewargs__',
     '__gt__',
     '__hash__',
     '__index__',
     '__init__',
     '__init_subclass__',
     '__int__',
     '__invert__',
     '__le__',
     '__lshift__',
     '__lt__',
     '__mod__',
     '__mul__',
     '__ne__',
     '__neg__',
     '__new__',
     '__or__',
     '__pos__',
     '__pow__',
     '__radd__',
     '__rand__',
     '__rdivmod__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__rfloordiv__',
     '__rlshift__',
     '__rmod__',
     '__rmul__',
     '__ror__',
     '__round__',
     '__rpow__',
     '__rrshift__',
     '__rshift__',
     '__rsub__',
     '__rtruediv__',
     '__rxor__',
     '__setattr__',
     '__sizeof__',
     '__str__',
     '__sub__',
     '__subclasshook__',
     '__truediv__',
     '__trunc__',
     '__xor__',
     'as_integer_ratio',
     'bit_length',
     'conjugate',
     'denominator',
     'from_bytes',
     'imag',
     'numerator',
     'real',
     'to_bytes']
    -----
    <class 'int'>.
    
    ======================================================================
    ERROR: test_castlike_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
        elem_type = _elem_type_as_str(t.elem_type)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: elem_type '16' is unknown
    fields:
    ['__abs__',
     '__add__',
     '__and__',
     '__bool__',
     '__ceil__',
     '__class__',
     '__delattr__',
     '__dir__',
     '__divmod__',
     '__doc__',
     '__eq__',
     '__float__',
     '__floor__',
     '__floordiv__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getnewargs__',
     '__gt__',
     '__hash__',
     '__index__',
     '__init__',
     '__init_subclass__',
     '__int__',
     '__invert__',
     '__le__',
     '__lshift__',
     '__lt__',
     '__mod__',
     '__mul__',
     '__ne__',
     '__neg__',
     '__new__',
     '__or__',
     '__pos__',
     '__pow__',
     '__radd__',
     '__rand__',
     '__rdivmod__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__rfloordiv__',
     '__rlshift__',
     '__rmod__',
     '__rmul__',
     '__ror__',
     '__round__',
     '__rpow__',
     '__rrshift__',
     '__rshift__',
     '__rsub__',
     '__rtruediv__',
     '__rxor__',
     '__setattr__',
     '__sizeof__',
     '__str__',
     '__sub__',
     '__subclasshook__',
     '__truediv__',
     '__trunc__',
     '__xor__',
     'as_integer_ratio',
     'bit_length',
     'conjugate',
     'denominator',
     'from_bytes',
     'imag',
     'numerator',
     'real',
     'to_bytes']
    -----
    <class 'int'>.
    
    ======================================================================
    ERROR: test_castlike_BFLOAT16_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
        elem_type = _elem_type_as_str(t.elem_type)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: elem_type '16' is unknown
    fields:
    ['__abs__',
     '__add__',
     '__and__',
     '__bool__',
     '__ceil__',
     '__class__',
     '__delattr__',
     '__dir__',
     '__divmod__',
     '__doc__',
     '__eq__',
     '__float__',
     '__floor__',
     '__floordiv__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getnewargs__',
     '__gt__',
     '__hash__',
     '__index__',
     '__init__',
     '__init_subclass__',
     '__int__',
     '__invert__',
     '__le__',
     '__lshift__',
     '__lt__',
     '__mod__',
     '__mul__',
     '__ne__',
     '__neg__',
     '__new__',
     '__or__',
     '__pos__',
     '__pow__',
     '__radd__',
     '__rand__',
     '__rdivmod__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__rfloordiv__',
     '__rlshift__',
     '__rmod__',
     '__rmul__',
     '__ror__',
     '__round__',
     '__rpow__',
     '__rrshift__',
     '__rshift__',
     '__rsub__',
     '__rtruediv__',
     '__rxor__',
     '__setattr__',
     '__sizeof__',
     '__str__',
     '__sub__',
     '__subclasshook__',
     '__truediv__',
     '__trunc__',
     '__xor__',
     'as_integer_ratio',
     'bit_length',
     'conjugate',
     'denominator',
     'from_bytes',
     'imag',
     'numerator',
     'real',
     'to_bytes']
    -----
    <class 'int'>.
    
    ======================================================================
    ERROR: test_castlike_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
        elem_type = _elem_type_as_str(t.elem_type)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: elem_type '16' is unknown
    fields:
    ['__abs__',
     '__add__',
     '__and__',
     '__bool__',
     '__ceil__',
     '__class__',
     '__delattr__',
     '__dir__',
     '__divmod__',
     '__doc__',
     '__eq__',
     '__float__',
     '__floor__',
     '__floordiv__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getnewargs__',
     '__gt__',
     '__hash__',
     '__index__',
     '__init__',
     '__init_subclass__',
     '__int__',
     '__invert__',
     '__le__',
     '__lshift__',
     '__lt__',
     '__mod__',
     '__mul__',
     '__ne__',
     '__neg__',
     '__new__',
     '__or__',
     '__pos__',
     '__pow__',
     '__radd__',
     '__rand__',
     '__rdivmod__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__rfloordiv__',
     '__rlshift__',
     '__rmod__',
     '__rmul__',
     '__ror__',
     '__round__',
     '__rpow__',
     '__rrshift__',
     '__rshift__',
     '__rsub__',
     '__rtruediv__',
     '__rxor__',
     '__setattr__',
     '__sizeof__',
     '__str__',
     '__sub__',
     '__subclasshook__',
     '__truediv__',
     '__trunc__',
     '__xor__',
     'as_integer_ratio',
     'bit_length',
     'conjugate',
     'denominator',
     'from_bytes',
     'imag',
     'numerator',
     'real',
     'to_bytes']
    -----
    <class 'int'>.
    
    ======================================================================
    ERROR: test_castlike_FLOAT_to_BFLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
        elem_type = _elem_type_as_str(t.elem_type)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: elem_type '16' is unknown
    fields:
    ['__abs__',
     '__add__',
     '__and__',
     '__bool__',
     '__ceil__',
     '__class__',
     '__delattr__',
     '__dir__',
     '__divmod__',
     '__doc__',
     '__eq__',
     '__float__',
     '__floor__',
     '__floordiv__',
     '__format__',
     '__ge__',
     '__getattribute__',
     '__getnewargs__',
     '__gt__',
     '__hash__',
     '__index__',
     '__init__',
     '__init_subclass__',
     '__int__',
     '__invert__',
     '__le__',
     '__lshift__',
     '__lt__',
     '__mod__',
     '__mul__',
     '__ne__',
     '__neg__',
     '__new__',
     '__or__',
     '__pos__',
     '__pow__',
     '__radd__',
     '__rand__',
     '__rdivmod__',
     '__reduce__',
     '__reduce_ex__',
     '__repr__',
     '__rfloordiv__',
     '__rlshift__',
     '__rmod__',
     '__rmul__',
     '__ror__',
     '__round__',
     '__rpow__',
     '__rrshift__',
     '__rshift__',
     '__rsub__',
     '__rtruediv__',
     '__rxor__',
     '__setattr__',
     '__sizeof__',
     '__str__',
     '__sub__',
     '__subclasshook__',
     '__truediv__',
     '__trunc__',
     '__xor__',
     'as_integer_ratio',
     'bit_length',
     'conjugate',
     'denominator',
     'from_bytes',
     'imag',
     'numerator',
     'real',
     'to_bytes']
    -----
    <class 'int'>.
    
    ======================================================================
    ERROR: test_constant_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
        outputs = list(prepared_model.run(inputs))
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
        outs = self._session.run(feeds)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
        return self._run(inputs, clean_right_away=False,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1254, in _run_whole_runtime
        res = self._whole.run(inputs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 108, in run
        v = next(iter(inputs.values()))
    StopIteration
    
    ======================================================================
    ERROR: test_div_uint8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Div(14) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Div(14) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "Div"
      }
      name: "test_div_uint8"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            ele
    [...]
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_gru_batchwise_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.RuntimeException: [ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h:54 onnxruntime::DeepCpuGruOp::DeepCpuGruOp(const onnxruntime::OpKernelInfo&) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h:54 onnxruntime::DeepCpuGruOp::DeepCpuGruOp(const onnxruntime::OpKernelInfo&) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    '
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "X"
        input: "W"
        input: "R"
        output: "Y"
        output: "Y_h"
        op_type: "GRU"
        attribute {
          name: "hidden_size"
          i: 6
          type: INT
        }
        attribute {
          name: "layout"
          i: 1
          type: INT
        }
      }
      name: "test_gru_batchwise"
      input {
        name: "X"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
      
    [...]
            dim {
                dim_value: 1
              }
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
      output {
        name: "Y_h"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_identity_opt_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  sequence_type {\n    elem_type {\n      tensor_type {\n        elem_type: 1\n        shape {\n          dim {\n            dim_value: 5\n          }\n        }\n      }\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_identity_sequence_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 120, in run
        return self.sess._sess.run_with_ort_values(
    RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
    
    During handling of the above exception, another exception occurred:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
        outputs = list(prepared_model.run(inputs))
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
        outs = self._session.run(feeds)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
        return self._run(inputs, clean_right_away=False,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1254, in _run_whole_runtime
        res = self._whole.run(inputs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 124, in run
        {k: v._get_c_value() for k, v in inputs.items()},
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 124, in <dictcomp>
        {k: v._get_c_value() for k, v in inputs.items()},
    AttributeError: 'list' object has no attribute '_get_c_value'
    
    ======================================================================
    ERROR: test_if_opt_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 605, in to_sequence
        outputs[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  sequence_type {\n    elem_type {\n      tensor_type {\n        elem_type: 1\n        shape {\n          dim {\n            dim_value: 5\n          }\n        }\n      }\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_loop16_seq_none_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  sequence_type {\n    elem_type {\n      tensor_type {\n        elem_type: 1\n        shape {\n        }\n      }\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_lstm_batchwise_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.RuntimeException: [ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/lstm_base.h:51 onnxruntime::LSTMBase::LSTMBase(const onnxruntime::OpKernelInfo&) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/lstm_base.h:51 onnxruntime::LSTMBase::LSTMBase(const onnxruntime::OpKernelInfo&) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    '
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "X"
        input: "W"
        input: "R"
        output: "Y"
        output: "Y_h"
        op_type: "LSTM"
        attribute {
          name: "hidden_size"
          i: 7
          type: INT
        }
        attribute {
          name: "layout"
          i: 1
          type: INT
        }
      }
      name: "test_lstm_batchwise"
      input {
        name: "X"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
    
    [...]
            dim {
                dim_value: 1
              }
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 7
              }
            }
          }
        }
      }
      output {
        name: "Y_h"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 7
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_max_int16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Max"
      }
      name: "test_max_int16"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 5
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 5
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 5
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_max_int8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Max"
      }
      name: "test_max_int8"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 3
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 3
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 3
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_max_uint16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Max"
      }
      name: "test_max_uint16"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_max_uint8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Max(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Max"
      }
      name: "test_max_uint8"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_min_int16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Min"
      }
      name: "test_min_int16"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 5
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 5
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 5
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_min_int8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Min"
      }
      name: "test_min_int8"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 3
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 3
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 3
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_min_uint16_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Min"
      }
      name: "test_min_uint16"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 4
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_min_uint8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Min(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "data_0"
        input: "data_1"
        output: "result"
        op_type: "Min"
      }
      name: "test_min_uint8"
      input {
        name: "data_0"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "data_1"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "result"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_momentum_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Momentum is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Momentum is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X"
        input: "G"
        input: "V"
        output: "X_new"
        output: "V_new"
        op_type: "Momentum"
        attribute {
          name: "alpha"
          f: 0.949999988079071
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 0.10000000149011612
          type: FLOAT
        }
        attribute {
          name: "mode"
          s: "standard"
          type: STRING
        }
        attribute {
          name: "norm_coefficient
    [...]
        dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "X_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "V_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_momentum_multiple_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Momentum is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Momentum is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X1"
        input: "X2"
        input: "G1"
        input: "G2"
        input: "H1"
        input: "H2"
        output: "X1_new"
        output: "X2_new"
        output: "V1_new"
        output: "V2_new"
        op_type: "Momentum"
        attribute {
          name: "alpha"
          f: 0.949999988079071
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 0.8500000238418579
          type: FLOAT
        }
        attribute {
          name: "mo
    [...]
      dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "V1_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "V2_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_mul_uint8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(14) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(14) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "Mul"
      }
      name: "test_mul_uint8"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            ele
    [...]
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_nesterov_momentum_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Momentum is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Momentum is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "R"
        input: "T"
        input: "X"
        input: "G"
        input: "V"
        output: "X_new"
        output: "V_new"
        op_type: "Momentum"
        attribute {
          name: "alpha"
          f: 0.949999988079071
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 1.0
          type: FLOAT
        }
        attribute {
          name: "mode"
          s: "nesterov"
          type: STRING
        }
        attribute {
          name: "norm_coefficient"
          f: 0.009
    [...]
        dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "X_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "V_new"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_optional_get_element_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  tensor_type {\n    elem_type: 1\n    shape {\n      dim {\n        dim_value: 4\n      }\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_optional_get_element_sequence_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  sequence_type {\n    elem_type {\n      tensor_type {\n        elem_type: 6\n        shape {\n          dim {\n            dim_value: 4\n          }\n        }\n      }\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_optional_has_element_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  tensor_type {\n    elem_type: 1\n    shape {\n      dim {\n        dim_value: 4\n      }\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_optional_has_element_empty_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
        self.graph_ = self.to_sequence(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
        variables[obj.name] = _var_as_dict(obj)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
        dtype['optional'] = _var_as_dict(optional)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
        raise NotImplementedError(  # pragma: no cover
    NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n  tensor_type {\n    elem_type: 6\n    shape {\n    }\n  }\n}\n'.
    
    ======================================================================
    ERROR: test_pow_types_float32_uint32_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Pow(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Pow(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "Pow"
      }
      name: "test_pow_types_float32_uint32"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            elem_type: 12
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_pow_types_float32_uint64_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Pow(12) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Pow(12) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "Pow"
      }
      name: "test_pow_types_float32_uint64"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            elem_type: 13
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 12
    }
    .
    
    ======================================================================
    ERROR: test_reshape_allowzero_reordered_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
        raise RuntimeError(  # pragma: no cover
    RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "data"
    type {
      tensor_type {
        elem_type: 1
        shape {
          dim {
            dim_value: 0
          }
          dim {
            dim_value: 3
          }
          dim {
            dim_value: 4
          }
        }
      }
    }
    .
    
    ======================================================================
    ERROR: test_sequence_insert_at_back_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 120, in run
        return self.sess._sess.run_with_ort_values(
    RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
    
    During handling of the above exception, another exception occurred:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
        outputs = list(prepared_model.run(inputs))
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
        outs = self._session.run(feeds)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
        return self._run(inputs, clean_right_away=False,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1254, in _run_whole_runtime
        res = self._whole.run(inputs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 124, in run
        {k: v._get_c_value() for k, v in inputs.items()},
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 124, in <dictcomp>
        {k: v._get_c_value() for k, v in inputs.items()},
    AttributeError: 'list' object has no attribute '_get_c_value'
    
    ======================================================================
    ERROR: test_sequence_insert_at_front_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 120, in run
        return self.sess._sess.run_with_ort_values(
    RuntimeError: Unable to cast Python instance to C++ type (compile in debug mode for details)
    
    During handling of the above exception, another exception occurred:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
        outputs = list(prepared_model.run(inputs))
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
        outs = self._session.run(feeds)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
        return self._run(inputs, clean_right_away=False,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1254, in _run_whole_runtime
        res = self._whole.run(inputs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 124, in run
        {k: v._get_c_value() for k, v in inputs.items()},
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 124, in <dictcomp>
        {k: v._get_c_value() for k, v in inputs.items()},
    AttributeError: 'list' object has no attribute '_get_c_value'
    
    ======================================================================
    ERROR: test_simple_rnn_batchwise_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.RuntimeException: [ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/rnn.h:44 onnxruntime::RNN<T>::RNN(const onnxruntime::OpKernelInfo&) [with T = float] layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 6 : RUNTIME_EXCEPTION : Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/rnn.h:44 onnxruntime::RNN<T>::RNN(const onnxruntime::OpKernelInfo&) [with T = float] layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    '
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "X"
        input: "W"
        input: "R"
        output: "Y"
        output: "Y_h"
        op_type: "RNN"
        attribute {
          name: "hidden_size"
          i: 4
          type: INT
        }
        attribute {
          name: "layout"
          i: 1
          type: INT
        }
      }
      name: "test_simple_rnn_batchwise"
      input {
        name: "X"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              d
    [...]
            dim {
                dim_value: 1
              }
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
      output {
        name: "Y_h"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_slice_start_out_of_bounds_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
        raise RuntimeError(  # pragma: no cover
    RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "y"
    type {
      tensor_type {
        elem_type: 1
        shape {
          dim {
            dim_value: 20
          }
          dim {
            dim_value: 0
          }
          dim {
            dim_value: 5
          }
        }
      }
    }
    .
    
    ======================================================================
    ERROR: test_sub_uint8_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Sub(14) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Sub(14) node with name '''
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "x"
        input: "y"
        output: "z"
        op_type: "Sub"
      }
      name: "test_sub_uint8"
      input {
        name: "x"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      input {
        name: "y"
        type {
          tensor_type {
            ele
    [...]
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "z"
        type {
          tensor_type {
            elem_type: 2
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 14
    }
    .
    
    ======================================================================
    ERROR: test_tril_zero_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
        raise RuntimeError(  # pragma: no cover
    RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "x"
    type {
      tensor_type {
        elem_type: 7
        shape {
          dim {
            dim_value: 3
          }
          dim {
            dim_value: 0
          }
          dim {
            dim_value: 5
          }
        }
      }
    }
    .
    
    ======================================================================
    ERROR: test_triu_zero_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
        raise RuntimeError(  # pragma: no cover
    RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "x"
    type {
      tensor_type {
        elem_type: 7
        shape {
          dim {
            dim_value: 0
          }
          dim {
            dim_value: 5
          }
        }
      }
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "Unsqueeze"
        attribute {
          name: "axes"
          ints: 3
          type: INTS
        }
      }
      node {
        input: "1"
        output: "2"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 2
          ints: 1
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute
    [...]
    
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
      output {
        name: "3"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "Unsqueeze"
        attribute {
          name: "axes"
          ints: 3
          type: INTS
        }
      }
      node {
        input: "1"
        output: "2"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 2
          ints: 1
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute
    [...]
    
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
      output {
        name: "3"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 2
          ints: 2
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute {
          name: "strides"
          ints: 2
          ints: 2
          type: INTS
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
    [...]
    
              dim {
                dim_value: 6
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
      output {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool2d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 2
          ints: 2
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute {
          name: "strides"
          ints: 2
          ints: 2
          type: INTS
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
    [...]
    
              dim {
                dim_value: 6
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
      output {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 2
          ints: 2
          ints: 2
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute {
          name: "strides"
          ints: 2
          ints: 2
          ints: 2
          type: INTS
        }
      }
      nam
    [...]
    
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
      output {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool3d_stride1_pad0_gpu_input_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 3
          ints: 3
          ints: 3
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute {
          name: "strides"
          ints: 1
          ints: 1
          ints: 1
          type: INTS
        }
      }
      nam
    [...]
    
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
      output {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_AvgPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for AveragePool(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "AveragePool"
        attribute {
          name: "kernel_shape"
          ints: 2
          ints: 2
          ints: 2
          type: INTS
        }
        attribute {
          name: "pads"
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          ints: 0
          type: INTS
        }
        attribute {
          name: "strides"
          ints: 2
          ints: 2
          ints: 2
          type: INTS
        }
      }
      nam
    [...]
    
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_BatchNorm1d_3d_input_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        input: "3"
        input: "4"
        output: "5"
        op_type: "BatchNormalization"
        attribute {
          name: "epsilon"
          f: 9.999999747378752e-06
          type: FLOAT
        }
        attribute {
          name: "is_test"
          i: 1
          type: INT
        }
        attribute {
          name: "momentum"
          f: 0.8999999761581421
          type: FLOAT
        }
      }
      name: "torch-jit-export"
      initial
    [...]
       }
        }
      }
      input {
        name: "4"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "5"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_BatchNorm2d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        input: "3"
        input: "4"
        output: "5"
        op_type: "BatchNormalization"
        attribute {
          name: "epsilon"
          f: 9.999999747378752e-06
          type: FLOAT
        }
        attribute {
          name: "is_test"
          i: 1
          type: INT
        }
        attribute {
          name: "momentum"
          f: 0.8999999761581421
          type: FLOAT
        }
      }
      name: "torch-jit-export"
      initial
    [...]
       tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "5"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 6
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_BatchNorm2d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        input: "3"
        input: "4"
        output: "5"
        op_type: "BatchNormalization"
        attribute {
          name: "epsilon"
          f: 0.0010000000474974513
          type: FLOAT
        }
        attribute {
          name: "is_test"
          i: 1
          type: INT
        }
        attribute {
          name: "momentum"
          f: 0.20000000298023224
          type: FLOAT
        }
      }
      name: "torch-jit-export"
      initia
    [...]
       tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "5"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 6
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_BatchNorm3d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        input: "3"
        input: "4"
        output: "5"
        op_type: "BatchNormalization"
        attribute {
          name: "epsilon"
          f: 9.999999747378752e-06
          type: FLOAT
        }
        attribute {
          name: "is_test"
          i: 1
          type: INT
        }
        attribute {
          name: "momentum"
          f: 0.8999999761581421
          type: FLOAT
        }
      }
      name: "torch-jit-export"
      initial
    [...]
    
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "5"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_BatchNorm3d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for BatchNormalization(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        input: "3"
        input: "4"
        output: "5"
        op_type: "BatchNormalization"
        attribute {
          name: "epsilon"
          f: 0.0010000000474974513
          type: FLOAT
        }
        attribute {
          name: "is_test"
          i: 1
          type: INT
        }
        attribute {
          name: "momentum"
          f: 0.30000001192092896
          type: FLOAT
        }
      }
      name: "torch-jit-export"
      initia
    [...]
    
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "5"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_GLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        output: "2"
        op_type: "Split"
        attribute {
          name: "axis"
          i: -1
          type: INT
        }
      }
      node {
        input: "2"
        output: "3"
        op_type: "Sigmoid"
      }
      node {
        input: "1"
        input: "3"
        output: "4"
        op_type: "Mul"
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
      output {
        name: "4"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_GLU_dim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        output: "2"
        op_type: "Split"
        attribute {
          name: "axis"
          i: 1
          type: INT
        }
      }
      node {
        input: "2"
        output: "3"
        op_type: "Sigmoid"
      }
      node {
        input: "1"
        input: "3"
        output: "4"
        op_type: "Mul"
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 6
              }
              dim {
                dim_value: 7
              }
            }
          }
        }
      }
      output {
        name: "4"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 7
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_Linear_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Gemm(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Gemm(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        output: "3"
        op_type: "Gemm"
        attribute {
          name: "alpha"
          f: 1.0
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 1.0
          type: FLOAT
        }
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
        attribute {
          name: "transB"
          i: 1
          type: INT
        }
      }
      name: "torch-jit-export"
      initializer {
        
    [...]
    {
                dim_value: 10
              }
            }
          }
        }
      }
      input {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 8
              }
            }
          }
        }
      }
      output {
        name: "3"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 8
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PReLU_1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "PRelu"
      }
      name: "torch-jit-export"
      initializer {
        dims: 1
        data_type: 1
        name: "1"
        raw_data: "\000\000\200>"
      }
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim
    [...]
       }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PReLU_1d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "PRelu"
      }
      name: "torch-jit-export"
      initializer {
        dims: 3
        data_type: 1
        name: "1"
        raw_data: "\000\000\200>\000\000\200>\000\000\200>"
      }
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
         
    [...]
       }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PReLU_2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "PRelu"
      }
      name: "torch-jit-export"
      initializer {
        dims: 1
        data_type: 1
        name: "1"
        raw_data: "\000\000\200>"
      }
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim
    [...]
       tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PReLU_2d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "PRelu"
      }
      name: "torch-jit-export"
      initializer {
        dims: 3
        data_type: 1
        name: "1"
        raw_data: "\000\000\200>\000\000\200>\000\000\200>"
      }
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
         
    [...]
       tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PReLU_3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "PRelu"
      }
      name: "torch-jit-export"
      initializer {
        dims: 1
        data_type: 1
        name: "1"
        raw_data: "\000\000\200>"
      }
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim
    [...]
    
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PReLU_3d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for PRelu(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "PRelu"
      }
      name: "torch-jit-export"
      initializer {
        dims: 3
        data_type: 1
        name: "1"
        raw_data: "\000\000\200>\000\000\200>\000\000\200>"
      }
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
         
    [...]
    
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
              dim {
                dim_value: 5
              }
              dim {
                dim_value: 6
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_PoissonNLLLLoss_no_reduce_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Mul(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        output: "1"
        op_type: "Constant"
        attribute {
          name: "value"
          t {
            dims: 10
            dims: 10
            data_type: 1
            raw_data: "g\253\261\277.\034\211\276G`\000\276Q|\300\277\347G\250\276\203!=?\025\220\313?\360hG\276.\235\214\276\355\242\260\276\351X7\277\354\376\023?)\324\302>R\217-\277u\026\001\276\333\246\326\277\350XM?Z\311\263\276\2209\211\275i\240Z\275\306\337T\277\016\374t?y\r\
    [...]
    it-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 10
              }
              dim {
                dim_value: 10
              }
            }
          }
        }
      }
      output {
        name: "4"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 10
              }
              dim {
                dim_value: 10
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_Softsign_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        output: "1"
        op_type: "Abs"
      }
      node {
        output: "2"
        op_type: "Constant"
        attribute {
          name: "value"
          t {
            data_type: 1
            raw_data: "\000\000\200?"
          }
          type: TENSOR
        }
      }
      node {
        input: "1"
        input: "2"
        output: "3"
        op_type: "Add"
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
      }
      node {
        input: "0"
        
    [...]
    
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
      output {
        name: "4"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 5
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_add_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
        attribute {
          name: "axis"
          i: 1
          type: INT
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_add_size1_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
        attribute {
          name: "axis"
          i: 0
          type: INT
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value
    [...]
     }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_add_size1_right_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
        attribute {
          name: "axis"
          i: 1
          type: INT
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_add_size1_singleton_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
        attribute {
          name: "axis"
          i: 0
          type: INT
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value
    [...]
     }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_addconstant_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        output: "1"
        op_type: "Constant"
        attribute {
          name: "value"
          t {
            data_type: 11
            raw_data: "\000\000\000\000\000\000\360?"
          }
          type: TENSOR
        }
      }
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 11
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_addmm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Gemm(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Gemm(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        input: "2"
        output: "3"
        op_type: "Gemm"
        attribute {
          name: "alpha"
          f: 1.0
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 1.0
          type: FLOAT
        }
        attribute {
          name: "broadcast"
          i: 1
          type: INT
        }
      }
      node {
        input: "0"
        input: "1"
        input: "3"
        output: "4"
        op_type: "Gemm"
        attribute {
          name:
    [...]
     {
                dim_value: 4
              }
            }
          }
        }
      }
      input {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
      output {
        name: "4"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_basic_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
      }
      node {
        input: "0"
        input: "2"
        output: "3"
        op_type: "Mul"
      }
      node {
        input: "3"
        output: "4"
        op_type: "Tanh"
      }
      node {
        input: "4"
        output: "5"
        op_type: "Sigmoid"
      }
      node {
        input: "5"
        output: "6"
        op_type: "Neg"
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
      output {
        name: "6"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_mm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Gemm(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Gemm(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        output: "2"
        op_type: "Constant"
        attribute {
          name: "value"
          t {
            dims: 1
            data_type: 1
            raw_data: "\000\000\000\000"
          }
          type: TENSOR
        }
      }
      node {
        input: "0"
        input: "1"
        input: "2"
        output: "3"
        op_type: "Gemm"
        attribute {
          name: "alpha"
          f: 1.0
          type: FLOAT
        }
        attribute {
          name: "beta"
          f: 0.0
          type: 
    [...]
       }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
      output {
        name: "3"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_non_float_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
      }
      node {
        input: "0"
        input: "2"
        output: "3"
        op_type: "Mul"
      }
      name: "torch-jit-export"
      initializer {
        dims: 2
        dims: 2
        data_type: 7
        name: "1"
        raw_data: "\001\000\000\000\000\000\000\000\002\000\000\000\000\000\000\000\003\000\000\000\000\000\000\000\004\000\000\000\000\000\000\000"
      }
      input {
        name: "0"
      
    [...]
       }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 7
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "3"
        type {
          tensor_type {
            elem_type: 7
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Add(6) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Add"
      }
      node {
        input: "0"
        input: "2"
        output: "3"
        op_type: "Mul"
      }
      node {
        input: "3"
        output: "4"
        op_type: "Tanh"
      }
      node {
        input: "4"
        output: "5"
        op_type: "Sigmoid"
      }
      node {
        input: "5"
        output: "6"
        op_type: "Neg"
      }
      name: "torch-jit-export"
      initializer {
        dims: 2
        dims: 2
        data_typ
    [...]
       }
        }
      }
      input {
        name: "1"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
      output {
        name: "6"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 2
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_operator_pow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 381, in _create_inference_session
        sess.initialize_session(providers, provider_options, disabled_optimizers)
    onnxruntime.capi.onnxruntime_pybind11_state.NotImplemented: [ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Pow(1) node with name ''
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 9 : NOT_IMPLEMENTED : Could not find an implementation for Pow(1) node with name '''
    ir_version: 3
    producer_name: "pytorch"
    producer_version: "0.3"
    graph {
      node {
        input: "0"
        input: "1"
        output: "2"
        op_type: "Pow"
      }
      name: "torch-jit-export"
      input {
        name: "0"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
            }
          }
      
    [...]
    
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
      output {
        name: "2"
        type {
          tensor_type {
            elem_type: 1
            shape {
              dim {
                dim_value: 1
              }
              dim {
                dim_value: 2
              }
              dim {
                dim_value: 3
              }
              dim {
                dim_value: 4
              }
            }
          }
        }
      }
    }
    opset_import {
      version: 6
    }
    .
    
    ======================================================================
    ERROR: test_gradient_of_add_and_mul_cpu (__main__.OnnxBackendSimpleModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Gradient is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Gradient is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "a"
        input: "b"
        output: "c"
        name: "my_add"
        op_type: "Add"
      }
      node {
        input: "c"
        input: "a"
        output: "d"
        name: "my_mul"
        op_type: "Mul"
      }
      node {
        input: "a"
        input: "b"
        output: "dd_da"
        output: "dd_db"
        name: "my_gradient"
        op_type: "Gradient"
        attribute {
          name: "xs"
          strings: "a"
          strings: "b"
          type: STRINGS
        }
        attribute {
          name: "y
    [...]
        }
        }
      }
      output {
        name: "d"
        type {
          tensor_type {
            elem_type: 1
            shape {
            }
          }
        }
      }
      output {
        name: "dd_da"
        type {
          tensor_type {
            elem_type: 1
            shape {
            }
          }
        }
      }
      output {
        name: "dd_db"
        type {
          tensor_type {
            elem_type: 1
            shape {
            }
          }
        }
      }
    }
    opset_import {
      domain: ""
      version: 12
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    ERROR: test_gradient_of_add_cpu (__main__.OnnxBackendSimpleModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 88, in __init__
        self.sess = InferenceSession(onnx_data, sess_options=sess_options,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 335, in __init__
        self._create_inference_session(providers, provider_options, disabled_optimizers)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/onnxruntime/capi/onnxruntime_inference_collection.py", line 372, in _create_inference_session
        sess = C.InferenceSession(session_options, self._model_bytes, False, self._read_config_from_model)
    onnxruntime.capi.onnxruntime_pybind11_state.Fail: [ONNXRuntimeError] : 1 : FAIL : Fatal error: Gradient is not a registered function/op
    
    The above exception was the direct cause of the following exception:
    
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
        prepared_model = self.backend.prepare(model, device)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
        return cls.prepare(binm, device, **kwargs)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
        inf = cls.create_inference_session(model)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 267, in create_inference_session
        return OnnxInference(model, runtime='onnxruntime1')
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
        self._init(existing_functions)
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 230, in _init
        self._whole = OnnxWholeSession(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_whole/session.py", line 93, in __init__
        raise RuntimeError(
    RuntimeError: Unable to create InferenceSession due to '[ONNXRuntimeError] : 1 : FAIL : Fatal error: Gradient is not a registered function/op'
    ir_version: 7
    producer_name: "backend-test"
    graph {
      node {
        input: "a"
        input: "b"
        output: "c"
        name: "my_add"
        op_type: "Add"
      }
      node {
        input: "a"
        input: "b"
        output: "dc_da"
        output: "dc_db"
        name: "my_gradient"
        op_type: "Gradient"
        attribute {
          name: "xs"
          strings: "a"
          strings: "b"
          type: STRINGS
        }
        attribute {
          name: "y"
          s: "c"
          type: STRING
        }
        domain: "ai.onnx.preview.training"
      }
      name: "Gradi
    [...]
        }
        }
      }
      output {
        name: "c"
        type {
          tensor_type {
            elem_type: 1
            shape {
            }
          }
        }
      }
      output {
        name: "dc_da"
        type {
          tensor_type {
            elem_type: 1
            shape {
            }
          }
        }
      }
      output {
        name: "dc_db"
        type {
          tensor_type {
            elem_type: 1
            shape {
            }
          }
        }
      }
    }
    opset_import {
      domain: ""
      version: 12
    }
    opset_import {
      domain: "ai.onnx.preview.training"
      version: 1
    }
    .
    
    ======================================================================
    FAIL: test_bernoulli_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 7 / 10 (70%)
    Max absolute difference: 1.
    Max relative difference: 1.
     x: array([1., 1., 0., 1., 0., 0., 1., 0., 0., 1.])
     y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
    
    ======================================================================
    FAIL: test_bernoulli_double_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 4 / 10 (40%)
    Max absolute difference: 1.
    Max relative difference: 1.
     x: array([0., 1., 1., 1., 0., 0., 0., 1., 0., 0.])
     y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
    
    ======================================================================
    FAIL: test_bernoulli_double_expanded_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 6 / 10 (60%)
    Max absolute difference: 1.
    Max relative difference: 1.
     x: array([0., 0., 1., 0., 1., 0., 0., 0., 0., 0.])
     y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
    
    ======================================================================
    FAIL: test_bernoulli_expanded_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 7 / 10 (70%)
    Max absolute difference: 1.
    Max relative difference: 1.
     x: array([1., 0., 0., 0., 0., 0., 1., 0., 0., 1.])
     y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
    
    ======================================================================
    FAIL: test_bernoulli_seed_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 5 / 10 (50%)
    Max absolute difference: 1.
    Max relative difference: 1.
     x: array([0., 0., 1., 0., 1., 0., 0., 0., 0., 1.], dtype=float32)
     y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.], dtype=float32)
    
    ======================================================================
    FAIL: test_bernoulli_seed_expanded_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 5 / 10 (50%)
    Max absolute difference: 1.
    Max relative difference: 1.
     x: array([0., 0., 1., 0., 1., 0., 0., 0., 0., 1.], dtype=float32)
     y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.], dtype=float32)
    
    ======================================================================
    FAIL: test_cast_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
        np.testing.assert_array_equal(outputs[i], ref_outputs[i])
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
        assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Arrays are not equal
    
    Mismatched elements: 3 / 12 (25%)
     x: array([['0.9767611', '0.60484552', '0.73926359', '0.039187793'],
           ['0.28280696', '0.12019656', '0.29614019', '0.11872772'],
           ['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
          dtype=object)
     y: array([['0.9767611', '0.6048455', '0.7392636', '0.039187793'],
           ['0.28280696', '0.12019656', '0.2961402', '0.11872772'],
           ['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
          dtype=object)
    
    ======================================================================
    FAIL: test_castlike_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
        np.testing.assert_array_equal(outputs[i], ref_outputs[i])
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
        assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Arrays are not equal
    
    Mismatched elements: 3 / 12 (25%)
     x: array([['0.9767611', '0.60484552', '0.73926359', '0.039187793'],
           ['0.28280696', '0.12019656', '0.29614019', '0.11872772'],
           ['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
          dtype=object)
     y: array([['0.9767611', '0.6048455', '0.7392636', '0.039187793'],
           ['0.28280696', '0.12019656', '0.2961402', '0.11872772'],
           ['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
          dtype=object)
    
    ======================================================================
    FAIL: test_castlike_FLOAT_to_STRING_expanded_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
        np.testing.assert_array_equal(outputs[i], ref_outputs[i])
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
        assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Arrays are not equal
    
    Mismatched elements: 3 / 12 (25%)
     x: array([['0.9767611', '0.60484552', '0.73926359', '0.039187793'],
           ['0.28280696', '0.12019656', '0.29614019', '0.11872772'],
           ['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
          dtype=object)
     y: array([['0.9767611', '0.6048455', '0.7392636', '0.039187793'],
           ['0.28280696', '0.12019656', '0.2961402', '0.11872772'],
           ['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
          dtype=object)
    
    ======================================================================
    FAIL: test_convtranspose_autopad_same_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 60 / 72 (83.3%)
    Max absolute difference: 20.
    Max relative difference: 11.
     x: array([[[[ 0.,  1.,  1.,  3.,  2.,  2.],
             [ 3.,  8.,  5., 12.,  7.,  7.],
             [ 3.,  7.,  4.,  9.,  5.,  5.],...
     y: array([[[[ 0.,  0.,  1.,  1.,  3.,  2.],
             [ 0.,  0.,  1.,  1.,  3.,  2.],
             [ 3.,  3.,  8.,  5., 12.,  7.],...
    
    ======================================================================
    FAIL: test_maxunpool_export_with_output_shape_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 8 / 25 (32%)
    Max absolute difference: 8.
    Max relative difference: 1.
     x: array([[[[0., 0., 0., 0., 0.],
             [5., 0., 6., 0., 0.],
             [0., 0., 0., 7., 0.],...
     y: array([[[[0., 0., 0., 0., 0.],
             [0., 5., 0., 6., 0.],
             [0., 0., 0., 0., 0.],...
    
    ======================================================================
    FAIL: test_resize_downsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 8 / 9 (88.9%)
    Max absolute difference: 1.048
    Max relative difference: 0.07
     x: array([[[[ 1. ,  2.5,  4. ],
             [ 7. ,  8.5, 10. ],
             [13. , 14.5, 16. ]]]], dtype=float32)
     y: array([[[[ 1.      ,  2.395192,  3.790383],
             [ 6.580766,  7.975958,  9.371149],
             [12.161532, 13.556724, 14.951916]]]], dtype=float32)
    
    ======================================================================
    FAIL: test_resize_downsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 1 / 2 (50%)
    Max absolute difference: 0.857
    Max relative difference: 0.273
     x: array([[[[1., 4.]]]], dtype=float32)
     y: array([[[[1.      , 3.142857]]]], dtype=float32)
    
    ======================================================================
    FAIL: test_training_dropout_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 23 / 60 (38.3%)
    Max absolute difference: 10.212
    Max relative difference: 1.
     x: array([[[ 0.      ,  0.      ,  3.914952,  0.      ,  0.      ],
            [-0.      ,  0.      , -0.      , -0.      ,  1.642394],
            [ 0.      ,  0.      ,  3.044151,  0.      ,  0.      ],...
     y: array([[[  0.      ,   0.      ,   0.      ,   0.      ,   0.      ],
            [ -0.      ,   0.      ,  -0.605429,  -0.412875,   0.      ],
            [  0.576174,   0.      ,   0.      ,   0.4867  ,   0.      ],...
    
    ======================================================================
    FAIL: test_training_dropout_default_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 32 / 60 (53.3%)
    Max absolute difference: 5.106
    Max relative difference: 1.
     x: array([[[ 0.      ,  0.      ,  1.957476,  0.      ,  3.735116],
            [-0.      ,  0.      , -0.302714, -0.206438,  0.821197],
            [ 0.      ,  2.908547,  1.522075,  0.      ,  0.      ],...
     y: array([[[ 3.528105,  0.800314,  1.957476,  4.481786,  0.      ],
            [-1.954556,  0.      , -0.302714, -0.206438,  0.      ],
            [ 0.288087,  2.908547,  1.522075,  0.24335 ,  0.      ],...
    
    ======================================================================
    FAIL: test_training_dropout_default_mask_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 32 / 60 (53.3%)
    Max absolute difference: 5.106
    Max relative difference: 1.
     x: array([[[ 0.      ,  0.      ,  1.957476,  0.      ,  3.735116],
            [-0.      ,  0.      , -0.302714, -0.206438,  0.821197],
            [ 0.      ,  2.908547,  1.522075,  0.      ,  0.      ],...
     y: array([[[ 3.528105,  0.800314,  1.957476,  4.481786,  0.      ],
            [-1.954556,  0.      , -0.302714, -0.206438,  0.      ],
            [ 0.288087,  2.908547,  1.522075,  0.24335 ,  0.      ],...
    
    ======================================================================
    FAIL: test_training_dropout_mask_cpu (__main__.OnnxBackendNodeModelTest)
    ----------------------------------------------------------------------
    Traceback (most recent call last):
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
        return test_func(*args, device=device, **kwargs)
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
        self.assert_similar_outputs(ref_outputs, outputs,
      File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
        np.testing.assert_allclose(
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
        assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
      File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
        raise AssertionError(msg)
    AssertionError: 
    Not equal to tolerance rtol=0.001, atol=1e-07
    
    Mismatched elements: 23 / 60 (38.3%)
    Max absolute difference: 10.212
    Max relative difference: 1.
     x: array([[[ 0.      ,  0.      ,  3.914952,  0.      ,  0.      ],
            [-0.      ,  0.      , -0.      , -0.      ,  1.642394],
            [ 0.      ,  0.      ,  3.044151,  0.      ,  0.      ],...
     y: array([[[  0.      ,   0.      ,   0.      ,   0.      ,   0.      ],
            [ -0.      ,   0.      ,  -0.605429,  -0.412875,   0.      ],
            [  0.576174,   0.      ,   0.      ,   0.4867  ,   0.      ],...
    
    ----------------------------------------------------------------------
    Ran 2026 tests in 45.718s
    
    FAILED (failures=17, errors=83, skipped=1021)
    [runpythonerror]
    2022-04-05 06:13:48.135976886 [E:onnxruntime:, inference_session.cc:1587 operator()] Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/deep_cpu_gru.h:54 onnxruntime::DeepCpuGruOp::DeepCpuGruOp(const onnxruntime::OpKernelInfo&) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    
    2022-04-05 06:13:49.168152854 [W:onnxruntime:, constant_folding.cc:202 ApplyImpl] Unsupported output type of N11onnxruntime22SequenceTensorTypeBaseE. Can't constant fold SequenceConstruct node ''
    2022-04-05 06:13:49.168278703 [W:onnxruntime:, constant_folding.cc:202 ApplyImpl] Unsupported output type of N11onnxruntime22SequenceTensorTypeBaseE. Can't constant fold SequenceConstruct node ''
    2022-04-05 06:13:49.168536700 [W:onnxruntime:, constant_folding.cc:202 ApplyImpl] Unsupported output type of N11onnxruntime22SequenceTensorTypeBaseE. Can't constant fold SequenceConstruct node ''
    2022-04-05 06:13:49.168625819 [W:onnxruntime:, constant_folding.cc:202 ApplyImpl] Unsupported output type of N11onnxruntime22SequenceTensorTypeBaseE. Can't constant fold SequenceConstruct node ''
    2022-04-05 06:13:50.245166435 [E:onnxruntime:, inference_session.cc:1587 operator()] Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/lstm_base.h:51 onnxruntime::LSTMBase::LSTMBase(const onnxruntime::OpKernelInfo&) layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    
    2022-04-05 06:13:57.438555907 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:13:57.439046462 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:13:57.439147791 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:06.223286076 [E:onnxruntime:, inference_session.cc:1587 operator()] Exception during initialization: /onnxruntime_src/onnxruntime/core/providers/cpu/rnn/rnn.h:44 onnxruntime::RNN<T>::RNN(const onnxruntime::OpKernelInfo&) [with T = float] layout_ == 0 was false. Batchwise recurrent operations (layout == 1) are not supported. If you need support create a github issue with justification.
    
    2022-04-05 06:14:11.225578800 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.227016495 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.227346951 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.234067581 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.234905203 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.235183490 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.241124609 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.241832481 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.242056939 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.247343504 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.248008077 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.248239155 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.253541500 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.254213603 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.254428920 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.259774575 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.260463798 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.260678036 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.265979541 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.266665234 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.266915541 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.273314905 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.274082147 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.274336714 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.281052324 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.281823866 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.282105933 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.288856654 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.289644535 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.289908623 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.296746372 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.297518854 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.297807221 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.304619580 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.305364922 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.305747878 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.311247471 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.311804296 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.311909715 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.319531305 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.320140949 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.320242828 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.327868539 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.328468212 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.328567321 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.336740327 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.337357451 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.337483569 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.345034691 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.345663314 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.345771993 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.353319185 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.353951989 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.354055458 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.361515860 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.362102714 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.362201183 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.369769904 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.370344318 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.370441667 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.377944840 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.378518204 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.378614783 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.386142345 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.386704869 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.386801638 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.394911453 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.395471658 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.395568577 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.403548634 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.404095959 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.404192478 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.412146645 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.412692069 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.412787838 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.420748115 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.421290570 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.421386279 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.429031080 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.429600224 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.429699623 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.437622061 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.438163525 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.438258294 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.445844656 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.446383750 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.446479929 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.453713344 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.454235768 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.454329707 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.462123206 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.462660451 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.462755660 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.977479332 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.978080656 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.978183775 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.986446289 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.987079243 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.987182272 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.994990970 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:11.995598914 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:11.995700453 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.004105006 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.004730710 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.004837539 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.012655317 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.013256931 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.013358800 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.020796533 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.021375587 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.021499605 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.029205986 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.029825349 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.029927058 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.037616048 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.038193712 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.038291251 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.046006311 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.046575455 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.046669854 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.054204907 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.054751071 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.054846920 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.061155824 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.061669109 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.061764808 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.068089512 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.068586407 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.068681696 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.075140669 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.075636214 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.075731803 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.082152457 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.082737421 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.082952518 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.240603313 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.241185797 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.241400055 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.408308584 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.408773369 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.408864808 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.415001785 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.415463410 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.415554729 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.422628106 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.423147000 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.423292669 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.592833581 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.593587443 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.593667202 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.593751531 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.599898967 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.600355002 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.600445491 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.606871125 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.607353220 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.607443049 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.613861923 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:12.614343508 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:12.614432887 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.007368232 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.007898946 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.007992705 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.056573371 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.057129606 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.057225785 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.064479210 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.065047004 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.065145823 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.071787984 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.072328008 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.072426907 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.079007439 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.079529193 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.079645632 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.152567606 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.153079940 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.153198069 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.216610352 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.217126656 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.217243535 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.268652492 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.269173437 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.269291805 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.294789411 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.295310236 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.295427424 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.300478912 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.301000307 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.301118016 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.307108253 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.307811316 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.307972384 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.315493696 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.316132520 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.316419157 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.340258069 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.340744894 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.340838053 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.347179828 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.347689433 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.347781212 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.354318264 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.354816689 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.354908318 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.360950095 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.361450530 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.361546899 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.367550507 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.368024062 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.368115961 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.374233687 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.374706782 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.374797701 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.381001137 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.381548901 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.381666140 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.387866176 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.388323401 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.388411780 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.395242969 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.395844703 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.396112760 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.496169323 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.496640478 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.496738777 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.503155640 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.503645825 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.503743144 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.509893410 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.510369586 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.510469284 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.516606901 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.517074136 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.517170425 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.523350021 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.523819506 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.523916015 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.530132061 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.530596916 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.530691635 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.537133958 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.537661242 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.537831651 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.680572390 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:13.681058225 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:13.681223903 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.138907447 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.139447362 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.139625550 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.144702267 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.145229832 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.145406720 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.150582756 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.151135080 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.151313419 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.157030419 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.157734442 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.157960940 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.163903218 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.164642160 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.165010396 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.171410530 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.171957404 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.172061443 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.179458617 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.179992611 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.180098460 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.186511114 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.187059988 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.187164737 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.195310712 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.195927406 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.196033265 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.300193225 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.300795468 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.300905587 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.307123013 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.307618817 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.307717136 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.313833033 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.314346608 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.314447337 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.321027969 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.321642982 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.321772411 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.328361222 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.328855807 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.328954986 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.335538918 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.336053553 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.336153052 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.343173789 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.343664034 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.343762453 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.350876469 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.351428543 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.351586372 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.468665227 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.469219662 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.469529198 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.588456825 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.588951790 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.589050079 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.596006357 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.596679010 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.597004697 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.708496210 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.708990515 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.709089794 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.715406119 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.715893474 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.716019682 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.828431397 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.828914762 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.829012651 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.835271506 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.835753911 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.835853640 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.842164834 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.842646349 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.842741268 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.849029063 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.849537288 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.849634207 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.856076910 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.856576995 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.856676714 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.863652261 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.864228435 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.864365384 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.870753988 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.871215153 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.871310762 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.877230471 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:14.877725755 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:14.877824594 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:15.091627607 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:15.092195831 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:15.092302590 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:15.100864721 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:15.101569354 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:15.101725683 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:15.111193264 [W:onnxruntime:, model.cc:163 Model] ONNX Runtime only *guarantees* support for models stamped with opset version 7 or above for opset domain 'ai.onnx'. Please upgrade your model to opset 7 or higher. For now, this opset 6 model may run depending upon legacy support of some older opset version operators.
    2022-04-05 06:14:15.111720549 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:15.111822738 [W:onnxruntime:, ort_transpose_optimizer.cc:24 ApplyImpl] Transpose optimizer failed: Unsupported ONNX opset
    2022-04-05 06:14:19.675716978 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.675897946 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_at appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.676632708 [W:onnxruntime:, constant_folding.cc:202 ApplyImpl] Unsupported output type of N11onnxruntime22SequenceTensorTypeBaseE. Can't constant fold SequenceEmpty node ''
    2022-04-05 06:14:19.676986035 [W:onnxruntime:, constant_folding.cc:202 ApplyImpl] Unsupported output type of N11onnxruntime22SequenceTensorTypeBaseE. Can't constant fold SequenceEmpty node ''
    2022-04-05 06:14:19.707916254 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_erase appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.708099803 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_at appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.718543494 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_erase appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.718727402 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_insert appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.718816481 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_at appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.
    2022-04-05 06:14:19.753078526 [W:onnxruntime:, graph.cc:1271 Graph] Initializer pos_at appears in graph inputs and will not be treated as constant value/weight. This may prevent some of the graph optimizations, like const folding. Move it out of graph inputs if there is no need to override it, by either re-generating the model with latest exporter/converter or with the tool onnxruntime/tools/python/remove_initializer_from_input.py.