ONNX Backends for Python/Numpy runtime (compiled)#
Backend class: OnnxInferenceBackend
.
<<<
import unittest
import sys
from datetime import datetime
from contextlib import redirect_stdout, redirect_stderr
from io import StringIO
from onnx.backend.test import BackendTest
from onnx import __version__ as onnx_version
from onnxruntime import __version__ as ort_version
from numpy import __version__ as npy_version
import mlprodict.onnxrt.backend_pyc as backend
back_test = BackendTest(backend, __name__)
back_test.include('.*_cpu')
back_test.exclude('.*_blvc_.*')
back_test.exclude('.*_densenet_.*')
back_test.exclude('.*_densenet121_.*')
back_test.exclude('.*_inception_.*')
back_test.exclude('.*_resnet50_.*')
back_test.exclude('.*_shufflenet_.*')
back_test.exclude('.*_squeezenet_.*')
back_test.exclude('.*_vgg19_.*')
back_test.exclude('.*_zfnet512_.*')
globals().update(back_test.enable_report().test_cases)
print('---------------------------------')
print('python', sys.version)
print('onnx', onnx_version)
print('onnxruntime', ort_version)
print('numpy', npy_version)
print('---------------------------------')
print(datetime.now(), "BEGIN")
print('---------------------------------')
buffer = StringIO()
if True:
with redirect_stdout(buffer):
with redirect_stderr(buffer):
res = unittest.main(verbosity=2, exit=False)
else:
res = unittest.main(verbosity=2, exit=False)
testsRun = res.result.testsRun
errors = len(res.result.errors)
skipped = len(res.result.skipped)
unexpectedSuccesses = len(res.result.unexpectedSuccesses)
expectedFailures = len(res.result.expectedFailures)
print('---------------------------------')
print(datetime.now(), "END")
print('---------------------------------')
print("testsRun=%d errors=%d skipped=%d" % (testsRun, errors, skipped))
print("unexpectedSuccesses=%d expectedFailures=%d" % (
unexpectedSuccesses, expectedFailures))
ran = testsRun - skipped
print("ratio=%f" % (1 - errors * 1.0 / ran))
print('---------------------------------')
lines = buffer.getvalue().split('\n')
print("\n".join(line for line in lines
if "skipped 'no matched include pattern'" not in line))
>>>
---------------------------------
python 3.9.1 (default, Jan 18 2021, 16:35:58)
[GCC 8.3.0]
onnx 1.11.0
onnxruntime 1.11.0
numpy 1.21.5
---------------------------------
2022-04-05 07:15:28.466790 BEGIN
---------------------------------
---------------------------------
2022-04-05 07:15:57.331689 END
---------------------------------
testsRun=2026 errors=303 skipped=1021
unexpectedSuccesses=0 expectedFailures=0
ratio=0.698507
---------------------------------
test_abs_cpu (__main__.OnnxBackendNodeModelTest) ... /var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/npy/xop.py:16: DeprecationWarning: Please use `coo_matrix` from the `scipy.sparse` namespace, the `scipy.sparse.coo` namespace is deprecated.
from scipy.sparse.coo import coo_matrix
/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/_op_numpy_helper.py:8: DeprecationWarning: Please use `coo_matrix` from the `scipy.sparse` namespace, the `scipy.sparse.coo` namespace is deprecated.
from scipy.sparse.coo import coo_matrix
/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py:188: DeprecationWarning: `np.object` is a deprecated alias for the builtin `object`. To silence this warning, use `object` by itself. Doing this will not modify any behavior and is safe.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
if ref_outputs[i].dtype == np.object:
ok
test_acos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_acos_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_acosh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_acosh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_adagrad_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_adagrad_multiple_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_adam_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_adam_multiple_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_add_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_add_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_add_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and_bcast3v1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and_bcast3v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and_bcast4v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and_bcast4v3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_and_bcast4v4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_default_axis_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_default_axis_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_default_axis_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_default_axis_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_negative_axis_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_negative_axis_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_negative_axis_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_negative_axis_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_no_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_no_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_no_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmax_no_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_default_axis_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_default_axis_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_default_axis_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_default_axis_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_negative_axis_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_negative_axis_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_negative_axis_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_negative_axis_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_no_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_no_keepdims_example_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_no_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_argmin_no_keepdims_random_select_last_index_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_asin_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_asin_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_asinh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_asinh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_atan_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_atan_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_atanh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_atanh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_1d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_ceil_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_pads_count_include_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_precomputed_pads_count_include_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_precomputed_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_precomputed_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_precomputed_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_same_lower_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_2d_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_averagepool_3d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_basic_conv_with_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_basic_conv_without_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_basic_convinteger_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_batchnorm_epsilon_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_batchnorm_epsilon_training_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_batchnorm_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_batchnorm_example_training_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bernoulli_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_bernoulli_double_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_bernoulli_double_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_bernoulli_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_bernoulli_seed_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_bernoulli_seed_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_bitshift_left_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_left_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_left_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_left_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_right_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_right_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_right_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_bitshift_right_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_cast_DOUBLE_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_DOUBLE_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_FLOAT16_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_FLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_cast_FLOAT_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_FLOAT_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cast_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_cast_STRING_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_castlike_BFLOAT16_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_castlike_DOUBLE_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_DOUBLE_to_FLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_DOUBLE_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_DOUBLE_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT16_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT16_to_DOUBLE_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT16_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_castlike_FLOAT_to_BFLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_castlike_FLOAT_to_DOUBLE_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT_to_DOUBLE_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT_to_FLOAT16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT_to_FLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_castlike_FLOAT_to_STRING_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_castlike_STRING_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_castlike_STRING_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_ceil_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_ceil_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_celu_cpu (__main__.OnnxBackendNodeModelTest) ... /var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_celu.py:47: DeprecationWarning: `np.float` is a deprecated alias for the builtin `float`. To silence this warning, use `float` by itself. Doing this will not modify any behavior and is safe. If you specifically wanted the numpy scalar type, use `np.float64` here.
Deprecated in NumPy 1.20; for more details and guidance: https://numpy.org/devdocs/release/1.20.0-notes.html#deprecations
lambda x: pycelu(x, self.alpha), otypes=[numpy.float])
ok
test_celu_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_default_inbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_clip_default_int8_inbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_clip_default_int8_max_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_clip_default_int8_min_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_default_max_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_clip_default_min_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_inbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_outbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_clip_splitbounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_compress_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_compress_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_compress_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_compress_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_1d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_1d_axis_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_2d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_2d_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_2d_axis_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_2d_axis_negative_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_3d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_3d_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_3d_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_3d_axis_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_3d_axis_negative_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_concat_3d_axis_negative_3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_constant_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_constant_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_constantofshape_float_ones_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_constantofshape_int_shape_zero_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_constantofshape_int_zeros_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_conv_with_autopad_same_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_conv_with_strides_and_asymmetric_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_conv_with_strides_no_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_conv_with_strides_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convinteger_with_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_convinteger_without_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_convtranspose_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_autopad_same_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_convtranspose_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_dilations_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_kernel_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_output_shape_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_convtranspose_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_convtranspose_with_kernel_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cos_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cosh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cosh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cumsum_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cumsum_1d_exclusive_cpu (__main__.OnnxBackendNodeModelTest) ... /var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_cum_sum.py:50: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
numpy.cumsum(x[indices_c], axis=axis, out=res[indices_d])
ok
test_cumsum_1d_reverse_cpu (__main__.OnnxBackendNodeModelTest) ... /var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_cum_sum.py:43: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
x = x[rev_indices]
/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_cum_sum.py:57: FutureWarning: Using a non-tuple sequence for multidimensional indexing is deprecated; use `arr[tuple(seq)]` instead of `arr[seq]`. In the future this will be interpreted as an array index, `arr[np.array(seq)]`, which will result either in an error or a different result.
res = res[rev_indices]
ok
test_cumsum_1d_reverse_exclusive_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cumsum_2d_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cumsum_2d_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_cumsum_2d_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_depthtospace_crd_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_depthtospace_crd_mode_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_depthtospace_dcr_mode_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_depthtospace_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_dequantizelinear_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dequantizelinear_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_det_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_det_nd_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_div_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_div_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_div_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_div_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dropout_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dropout_default_mask_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dropout_default_mask_ratio_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dropout_default_old_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dropout_default_ratio_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dropout_random_old_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_dynamicquantizelinear_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_dynamicquantizelinear_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_dynamicquantizelinear_max_adjusted_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_dynamicquantizelinear_max_adjusted_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_dynamicquantizelinear_min_adjusted_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_dynamicquantizelinear_min_adjusted_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_edge_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_einsum_batch_diagonal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_einsum_batch_matmul_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_einsum_inner_prod_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_einsum_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_einsum_transpose_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_elu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_elu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_elu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_equal_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_equal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_erf_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_exp_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_exp_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_expand_dim_changed_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_expand_dim_unchanged_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_eyelike_populate_off_main_diagonal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_eyelike_with_dtype_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_eyelike_without_dtype_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_flatten_axis0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_axis1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_axis2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_axis3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_negative_axis1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_negative_axis2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_negative_axis3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_flatten_negative_axis4_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_floor_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_floor_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_2d_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_elements_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_elements_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_elements_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gather_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gathernd_example_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gathernd_example_int32_batch_dim1_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gathernd_example_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gemm_all_attributes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_alpha_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_beta_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_default_matrix_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_default_no_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_default_scalar_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_default_single_elem_vector_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_default_vector_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_default_zero_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_transposeA_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gemm_transposeB_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_globalaveragepool_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_globalaveragepool_precomputed_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_globalmaxpool_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_globalmaxpool_precomputed_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_greater_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_greater_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_greater_equal_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_greater_equal_bcast_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_greater_equal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_greater_equal_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_gridsample_aligncorners_true_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_bicubic_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_bilinear_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_border_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_reflection_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gridsample_zeros_padding_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gru_batchwise_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gru_defaults_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gru_seq_length_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_gru_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_hardmax_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardmax_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardmax_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardmax_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardmax_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardmax_one_hot_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardsigmoid_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardsigmoid_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardsigmoid_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_hardswish_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_hardswish_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_identity_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_identity_opt_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_identity_sequence_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_if_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_if_opt_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_if_seq_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_instancenorm_epsilon_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_instancenorm_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_isinf_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_isinf_negative_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_isinf_positive_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_isnan_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_leakyrelu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_leakyrelu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_leakyrelu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_less_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_less_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_less_equal_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_less_equal_bcast_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_less_equal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_less_equal_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_log_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_log_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_axis_0_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_axis_1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_axis_2_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_logsoftmax_default_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_example_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_example_1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_large_number_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_large_number_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_logsoftmax_negative_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_loop11_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_loop13_seq_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_loop16_seq_none_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_lrn_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_lrn_default_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_lstm_batchwise_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_lstm_defaults_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_lstm_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_lstm_with_peepholes_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_matmul_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_matmul_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_matmul_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_matmulinteger_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_max_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_max_float16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_float64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_int16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_int8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_max_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_max_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_1d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_ceil_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_dilations_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_precomputed_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_precomputed_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_precomputed_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_same_lower_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_same_upper_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_2d_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_maxpool_3d_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_with_argmax_2d_precomputed_pads_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxpool_with_argmax_2d_precomputed_strides_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_maxunpool_export_with_output_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_maxunpool_export_without_output_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_mean_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mean_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mean_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_min_float16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_float64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_int16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_int8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_min_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_min_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_broadcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_int64_fmod_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_mod_mixed_sign_float16_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_mod_mixed_sign_float32_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_mod_mixed_sign_float64_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_mod_mixed_sign_int16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_mixed_sign_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_mixed_sign_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_mixed_sign_int8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_uint16_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mod_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_momentum_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_momentum_multiple_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_mul_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mul_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mul_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mul_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_mvn_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_mvn_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_neg_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nesterov_momentum_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NC_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NC_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1_mean_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1_mean_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1_weight_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_no_weight_reduction_mean_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1d2_no_weight_reduction_mean_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_reduction_mean_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_reduction_mean_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_reduction_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_reduction_sum_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_reduction_mean_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1d2_with_weight_reduction_mean_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_reduction_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_reduction_sum_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_reduction_sum_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2_with_weight_reduction_sum_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3_none_no_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3_sum_weight_high_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3_sum_weight_high_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3d4d5_mean_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nllloss_NCd1d2d3d4d5_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3d4d5_none_no_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nllloss_NCd1d2d3d4d5_none_no_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_nonmaxsuppression_center_point_box_format_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_flipped_coordinates_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_identical_boxes_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_limit_output_size_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_single_box_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_suppress_by_IOU_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_two_batches_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonmaxsuppression_two_classes_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_nonzero_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_not_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_not_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_not_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_onehot_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_onehot_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_onehot_with_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_onehot_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_optional_get_element_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_optional_get_element_sequence_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_optional_has_element_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_optional_has_element_empty_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_or2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or_bcast3v1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or_bcast3v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or_bcast4v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or_bcast4v3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_or_bcast4v4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_bcast_array_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_bcast_scalar_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_float32_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_float32_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_float32_uint32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_float32_uint64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_float_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_int32_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_int32_int32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_int64_float32_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_int64_int64_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_pow_types_int_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_prelu_broadcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_prelu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_qlinearconv_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_qlinearmatmul_2D_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_qlinearmatmul_3D_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_quantizelinear_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_quantizelinear_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_range_float_type_positive_delta_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_range_float_type_positive_delta_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_range_int32_type_negative_delta_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_range_int32_type_negative_delta_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_reciprocal_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reciprocal_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_negative_axes_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l1_negative_axes_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_negative_axes_keep_dims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_l2_negative_axes_keep_dims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_asc_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_desc_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_exp_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_log_sum_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_default_axes_keepdim_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_max_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_mean_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_min_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_prod_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... /var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_reduce_sum.py:75: DeprecationWarning: The truth value of an empty array is ambiguous. Returning False, but in future this will result in an error. Use `array.size > 0` to check that an array is not empty.
return (numpy.sum(data, axis=axes if axes else None,
ok
test_reduce_sum_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_empty_axes_input_noop_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_empty_axes_input_noop_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_default_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_default_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_do_not_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_do_not_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_negative_axes_keepdims_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reduce_sum_square_negative_axes_keepdims_random_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reflect_pad_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_relu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_allowzero_reordered_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_reshape_extended_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_negative_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_negative_extended_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_one_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_reduced_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_reordered_all_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_reordered_last_dims_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_zero_and_negative_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_reshape_zero_dim_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_scales_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_scales_linear_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_scales_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_sizes_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_sizes_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_tf_crop_and_resize_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_cubic_asymmetric_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_linear_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_scales_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_sizes_cubic_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_sizes_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_sizes_nearest_floor_align_corners_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_reversesequence_batch_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_reversesequence_time_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_rnn_seq_length_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_roialign_aligned_false_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_roialign_aligned_true_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_round_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_scan9_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_scan_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_scatter_elements_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_scatter_elements_with_duplicate_indices_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_scatter_elements_with_negative_indices_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_scatter_elements_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_scatter_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_scatter_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_scatternd_add_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_scatternd_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_scatternd_multiply_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_NCd1_mean_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1_mean_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_NCd1_mean_weight_negative_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_NCd1d2d3_none_no_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_none_no_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_none_no_weight_negative_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_sum_weight_high_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_sum_weight_high_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3_sum_weight_high_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3d4d5_mean_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3d4d5_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_NCd1d2d3d4d5_mean_weight_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_NCd1d2d3d4d5_none_no_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3d4d5_none_no_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_NCd1d2d3d4d5_none_no_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_3d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_no_weight_ii_3d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_no_weight_ii_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_4d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_no_weight_ii_4d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_4d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_no_weight_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_no_weight_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_no_weight_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_ii_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_ii_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_ii_3d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_ii_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_ii_4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_ii_4d_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_ii_4d_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_ii_4d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_ii_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_ii_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_mean_weight_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_mean_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sce_none_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_weights_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_weights_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_weights_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_none_weights_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_sum_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_sum_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_sum_log_prob_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sce_sum_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_selu_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_selu_default_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_selu_example_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_sequence_insert_at_back_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sequence_insert_at_front_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_shape_clip_end_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_shape_clip_start_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_shape_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_shape_end_1_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_shape_end_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_shape_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_shape_start_1_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_shape_start_1_end_2_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_shape_start_1_end_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_shape_start_negative_1_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_shrink_hard_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_shrink_soft_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sigmoid_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sigmoid_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sign_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_simple_rnn_batchwise_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_simple_rnn_defaults_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_simple_rnn_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_sin_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sin_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sinh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sinh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_size_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_size_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_default_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_default_steps_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_end_out_of_bounds_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_neg_steps_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_slice_start_out_of_bounds_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_softmax_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_axis_0_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_axis_1_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_axis_2_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_softmax_default_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_example_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_large_number_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_large_number_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softmax_negative_axis_expanded_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_softplus_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_softplus_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_softsign_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_softsign_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_spacetodepth_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_spacetodepth_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_split_equal_parts_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_split_equal_parts_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_split_equal_parts_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_split_variable_parts_1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_split_variable_parts_2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_split_variable_parts_default_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_split_zero_size_splits_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sqrt_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sqrt_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_squeeze_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_squeeze_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_strnormalizer_export_monday_casesensintive_lower_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_strnormalizer_export_monday_casesensintive_nochangecase_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_strnormalizer_export_monday_casesensintive_upper_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_strnormalizer_export_monday_empty_output_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_strnormalizer_export_monday_insensintive_upper_twodim_cpu (__main__.OnnxBackendNodeModelTest) ... FAIL
test_strnormalizer_nostopwords_nochangecase_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sub_bcast_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sub_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sub_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sub_uint8_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sum_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sum_one_input_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_sum_two_inputs_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tan_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tan_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tanh_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tanh_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tfidfvectorizer_tf_batch_onlybigrams_skip0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tfidfvectorizer_tf_batch_onlybigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tfidfvectorizer_tf_batch_uniandbigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tfidfvectorizer_tf_only_bigrams_skip0_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_tfidfvectorizer_tf_onlybigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_thresholdedrelu_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_thresholdedrelu_default_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_thresholdedrelu_example_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_tile_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_tile_precomputed_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_top_k_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_top_k_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_top_k_smallest_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_training_dropout_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_training_dropout_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_training_dropout_default_mask_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_training_dropout_mask_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_training_dropout_zero_ratio_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_training_dropout_zero_ratio_mask_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_all_permutations_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_all_permutations_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_all_permutations_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_all_permutations_3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_all_permutations_4_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_all_permutations_5_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_transpose_default_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_one_row_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_out_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_out_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_square_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_square_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_tril_zero_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_triu_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_one_row_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_out_neg_out_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_out_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_pos_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_square_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_square_neg_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_triu_zero_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_unique_not_sorted_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_unique_sorted_with_axis_3d_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_unique_sorted_with_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_unique_sorted_with_negative_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_unique_sorted_without_axis_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_unsqueeze_axis_0_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_axis_1_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_axis_2_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_axis_3_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_negative_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_three_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_two_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_unsqueeze_unsorted_axes_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_upsample_nearest_cpu (__main__.OnnxBackendNodeModelTest) ... ERROR
test_where_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_where_long_example_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor_bcast3v1d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor_bcast3v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor_bcast4v2d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor_bcast4v3d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_xor_bcast4v4d_cpu (__main__.OnnxBackendNodeModelTest) ... ok
test_AvgPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_AvgPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_AvgPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_AvgPool2d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_AvgPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_AvgPool3d_stride1_pad0_gpu_input_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_AvgPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_BatchNorm1d_3d_input_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_BatchNorm2d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_BatchNorm2d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_BatchNorm3d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_BatchNorm3d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ConstantPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_pad1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_pad1size1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_pad2_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_pad2size1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_depthwise_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_depthwise_padded_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_depthwise_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_depthwise_with_multiplier_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_groups_thnn_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv2d_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_dilated_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Conv3d_stride_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ConvTranspose2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ConvTranspose2d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ELU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Embedding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Embedding_sparse_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_GLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_GLU_dim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_LeakyReLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_LeakyReLU_with_negval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Linear_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Linear_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_LogSoftmax_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_MaxPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_MaxPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_MaxPool1d_stride_padding_dilation_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
test_MaxPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_MaxPool2d_stride_padding_dilation_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ok
test_MaxPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_MaxPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_MaxPool3d_stride_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PReLU_1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PReLU_1d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PReLU_2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PReLU_2d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PReLU_3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PReLU_3d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PixelShuffle_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_PoissonNLLLLoss_no_reduce_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ReLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ReflectionPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ReplicationPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_SELU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Sigmoid_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Softmax_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Softmin_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Softplus_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Softsign_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_Tanh_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_ZeroPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_log_softmax_dim3_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_log_softmax_lastdim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_softmax_functional_dim3_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_softmax_lastdim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest) ... ERROR
test_operator_add_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_add_size1_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_add_size1_right_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_add_size1_singleton_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_addconstant_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_addmm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_basic_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_chunk_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_clip_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_concat2_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_conv_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_convtranspose_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_exp_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_flatten_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_index_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_max_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_maxpool_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_min_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_mm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_non_float_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_pad_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_permute2_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_pow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_reduced_mean_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_reduced_mean_keepdim_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_reduced_sum_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_reduced_sum_keepdim_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_repeat_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_repeat_dim_overflow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_selu_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_sqrt_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_symbolic_override_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_symbolic_override_nested_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_operator_view_cpu (__main__.OnnxBackendPyTorchOperatorModelTest) ... ERROR
test_bvlc_alexnet_cpu (__main__.OnnxBackendRealModelTest) ... ERROR
test_densenet121_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_densenet121_.*"'
test_densenet121_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_densenet121_.*"'
test_inception_v1_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
test_inception_v1_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
test_inception_v2_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
test_inception_v2_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_inception_.*"'
test_resnet50_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_resnet50_.*"'
test_resnet50_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_resnet50_.*"'
test_shufflenet_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_shufflenet_.*"'
test_shufflenet_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_shufflenet_.*"'
test_squeezenet_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_squeezenet_.*"'
test_squeezenet_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_squeezenet_.*"'
test_vgg19_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_vgg19_.*"'
test_vgg19_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_vgg19_.*"'
test_zfnet512_cpu (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_zfnet512_.*"'
test_zfnet512_cuda (__main__.OnnxBackendRealModelTest) ... skipped 'matched exclude pattern ".*_zfnet512_.*"'
test_expand_shape_model1_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_expand_shape_model2_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_expand_shape_model3_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_expand_shape_model4_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_gradient_of_add_and_mul_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_gradient_of_add_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sequence_model1_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sequence_model2_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sequence_model3_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sequence_model4_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_sequence_model5_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_sequence_model6_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sequence_model7_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sequence_model8_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_shrink_cpu (__main__.OnnxBackendSimpleModelTest) ... ERROR
test_sign_model_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_single_relu_model_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
test_strnorm_model_monday_casesensintive_lower_cpu (__main__.OnnxBackendSimpleModelTest) ... FAIL
test_strnorm_model_monday_casesensintive_nochangecase_cpu (__main__.OnnxBackendSimpleModelTest) ... FAIL
test_strnorm_model_monday_casesensintive_upper_cpu (__main__.OnnxBackendSimpleModelTest) ... FAIL
test_strnorm_model_monday_empty_output_cpu (__main__.OnnxBackendSimpleModelTest) ... FAIL
test_strnorm_model_monday_insensintive_upper_twodim_cpu (__main__.OnnxBackendSimpleModelTest) ... FAIL
test_strnorm_model_nostopwords_nochangecase_cpu (__main__.OnnxBackendSimpleModelTest) ... ok
======================================================================
ERROR: test_adagrad_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Adagrad' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_adagrad_multiple_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Adagrad' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_adam_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Adam' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_adam_multiple_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Adam' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_basic_convinteger_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ConvInteger' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_bernoulli_seed_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "_mt19937.pyx", line 178, in numpy.random._mt19937.MT19937._legacy_seeding
TypeError: 'float' object cannot be interpreted as an integer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_random.py", line 85, in _run
state = self._get_state(self.seed)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_random.py", line 66, in _get_state
state = numpy.random.RandomState(seed=self.seed)
File "mtrand.pyx", line 185, in numpy.random.mtrand.RandomState.__init__
File "_mt19937.pyx", line 166, in numpy.random._mt19937.MT19937._legacy_seeding
File "_mt19937.pyx", line 186, in numpy.random._mt19937.MT19937._legacy_seeding
TypeError: Cannot cast scalar from dtype('float64') to dtype('int64') according to the rule 'safe'
======================================================================
ERROR: test_bernoulli_seed_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "_mt19937.pyx", line 178, in numpy.random._mt19937.MT19937._legacy_seeding
TypeError: 'float' object cannot be interpreted as an integer
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_random.py", line 155, in _run
state = self._get_state(self.seed)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_random.py", line 66, in _get_state
state = numpy.random.RandomState(seed=self.seed)
File "mtrand.pyx", line 185, in numpy.random.mtrand.RandomState.__init__
File "_mt19937.pyx", line 166, in numpy.random._mt19937.MT19937._legacy_seeding
File "_mt19937.pyx", line 186, in numpy.random._mt19937.MT19937._legacy_seeding
TypeError: Cannot cast scalar from dtype('float64') to dtype('int64') according to the rule 'safe'
======================================================================
ERROR: test_cast_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
elem_type = _elem_type_as_str(t.elem_type)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
raise NotImplementedError( # pragma: no cover
NotImplementedError: elem_type '16' is unknown
fields:
['__abs__',
'__add__',
'__and__',
'__bool__',
'__ceil__',
'__class__',
'__delattr__',
'__dir__',
'__divmod__',
'__doc__',
'__eq__',
'__float__',
'__floor__',
'__floordiv__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__index__',
'__init__',
'__init_subclass__',
'__int__',
'__invert__',
'__le__',
'__lshift__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__new__',
'__or__',
'__pos__',
'__pow__',
'__radd__',
'__rand__',
'__rdivmod__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rfloordiv__',
'__rlshift__',
'__rmod__',
'__rmul__',
'__ror__',
'__round__',
'__rpow__',
'__rrshift__',
'__rshift__',
'__rsub__',
'__rtruediv__',
'__rxor__',
'__setattr__',
'__sizeof__',
'__str__',
'__sub__',
'__subclasshook__',
'__truediv__',
'__trunc__',
'__xor__',
'as_integer_ratio',
'bit_length',
'conjugate',
'denominator',
'from_bytes',
'imag',
'numerator',
'real',
'to_bytes']
-----
<class 'int'>.
======================================================================
ERROR: test_cast_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 605, in to_sequence
outputs[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
elem_type = _elem_type_as_str(t.elem_type)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
raise NotImplementedError( # pragma: no cover
NotImplementedError: elem_type '16' is unknown
fields:
['__abs__',
'__add__',
'__and__',
'__bool__',
'__ceil__',
'__class__',
'__delattr__',
'__dir__',
'__divmod__',
'__doc__',
'__eq__',
'__float__',
'__floor__',
'__floordiv__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__index__',
'__init__',
'__init_subclass__',
'__int__',
'__invert__',
'__le__',
'__lshift__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__new__',
'__or__',
'__pos__',
'__pow__',
'__radd__',
'__rand__',
'__rdivmod__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rfloordiv__',
'__rlshift__',
'__rmod__',
'__rmul__',
'__ror__',
'__round__',
'__rpow__',
'__rrshift__',
'__rshift__',
'__rsub__',
'__rtruediv__',
'__rxor__',
'__setattr__',
'__sizeof__',
'__str__',
'__sub__',
'__subclasshook__',
'__truediv__',
'__trunc__',
'__xor__',
'as_integer_ratio',
'bit_length',
'conjugate',
'denominator',
'from_bytes',
'imag',
'numerator',
'real',
'to_bytes']
-----
<class 'int'>.
======================================================================
ERROR: test_castlike_BFLOAT16_to_FLOAT_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
elem_type = _elem_type_as_str(t.elem_type)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
raise NotImplementedError( # pragma: no cover
NotImplementedError: elem_type '16' is unknown
fields:
['__abs__',
'__add__',
'__and__',
'__bool__',
'__ceil__',
'__class__',
'__delattr__',
'__dir__',
'__divmod__',
'__doc__',
'__eq__',
'__float__',
'__floor__',
'__floordiv__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__index__',
'__init__',
'__init_subclass__',
'__int__',
'__invert__',
'__le__',
'__lshift__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__new__',
'__or__',
'__pos__',
'__pow__',
'__radd__',
'__rand__',
'__rdivmod__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rfloordiv__',
'__rlshift__',
'__rmod__',
'__rmul__',
'__ror__',
'__round__',
'__rpow__',
'__rrshift__',
'__rshift__',
'__rsub__',
'__rtruediv__',
'__rxor__',
'__setattr__',
'__sizeof__',
'__str__',
'__sub__',
'__subclasshook__',
'__truediv__',
'__trunc__',
'__xor__',
'as_integer_ratio',
'bit_length',
'conjugate',
'denominator',
'from_bytes',
'imag',
'numerator',
'real',
'to_bytes']
-----
<class 'int'>.
======================================================================
ERROR: test_castlike_BFLOAT16_to_FLOAT_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
elem_type = _elem_type_as_str(t.elem_type)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
raise NotImplementedError( # pragma: no cover
NotImplementedError: elem_type '16' is unknown
fields:
['__abs__',
'__add__',
'__and__',
'__bool__',
'__ceil__',
'__class__',
'__delattr__',
'__dir__',
'__divmod__',
'__doc__',
'__eq__',
'__float__',
'__floor__',
'__floordiv__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__index__',
'__init__',
'__init_subclass__',
'__int__',
'__invert__',
'__le__',
'__lshift__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__new__',
'__or__',
'__pos__',
'__pow__',
'__radd__',
'__rand__',
'__rdivmod__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rfloordiv__',
'__rlshift__',
'__rmod__',
'__rmul__',
'__ror__',
'__round__',
'__rpow__',
'__rrshift__',
'__rshift__',
'__rsub__',
'__rtruediv__',
'__rxor__',
'__setattr__',
'__sizeof__',
'__str__',
'__sub__',
'__subclasshook__',
'__truediv__',
'__trunc__',
'__xor__',
'as_integer_ratio',
'bit_length',
'conjugate',
'denominator',
'from_bytes',
'imag',
'numerator',
'real',
'to_bytes']
-----
<class 'int'>.
======================================================================
ERROR: test_castlike_FLOAT_to_BFLOAT16_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
elem_type = _elem_type_as_str(t.elem_type)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
raise NotImplementedError( # pragma: no cover
NotImplementedError: elem_type '16' is unknown
fields:
['__abs__',
'__add__',
'__and__',
'__bool__',
'__ceil__',
'__class__',
'__delattr__',
'__dir__',
'__divmod__',
'__doc__',
'__eq__',
'__float__',
'__floor__',
'__floordiv__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__index__',
'__init__',
'__init_subclass__',
'__int__',
'__invert__',
'__le__',
'__lshift__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__new__',
'__or__',
'__pos__',
'__pow__',
'__radd__',
'__rand__',
'__rdivmod__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rfloordiv__',
'__rlshift__',
'__rmod__',
'__rmul__',
'__ror__',
'__round__',
'__rpow__',
'__rrshift__',
'__rshift__',
'__rsub__',
'__rtruediv__',
'__rxor__',
'__setattr__',
'__sizeof__',
'__str__',
'__sub__',
'__subclasshook__',
'__truediv__',
'__trunc__',
'__xor__',
'as_integer_ratio',
'bit_length',
'conjugate',
'denominator',
'from_bytes',
'imag',
'numerator',
'real',
'to_bytes']
-----
<class 'int'>.
======================================================================
ERROR: test_castlike_FLOAT_to_BFLOAT16_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 362, in _var_as_dict
elem_type = _elem_type_as_str(t.elem_type)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 292, in _elem_type_as_str
raise NotImplementedError( # pragma: no cover
NotImplementedError: elem_type '16' is unknown
fields:
['__abs__',
'__add__',
'__and__',
'__bool__',
'__ceil__',
'__class__',
'__delattr__',
'__dir__',
'__divmod__',
'__doc__',
'__eq__',
'__float__',
'__floor__',
'__floordiv__',
'__format__',
'__ge__',
'__getattribute__',
'__getnewargs__',
'__gt__',
'__hash__',
'__index__',
'__init__',
'__init_subclass__',
'__int__',
'__invert__',
'__le__',
'__lshift__',
'__lt__',
'__mod__',
'__mul__',
'__ne__',
'__neg__',
'__new__',
'__or__',
'__pos__',
'__pow__',
'__radd__',
'__rand__',
'__rdivmod__',
'__reduce__',
'__reduce_ex__',
'__repr__',
'__rfloordiv__',
'__rlshift__',
'__rmod__',
'__rmul__',
'__ror__',
'__round__',
'__rpow__',
'__rrshift__',
'__rshift__',
'__rsub__',
'__rtruediv__',
'__rxor__',
'__setattr__',
'__sizeof__',
'__str__',
'__sub__',
'__subclasshook__',
'__truediv__',
'__trunc__',
'__xor__',
'as_integer_ratio',
'bit_length',
'conjugate',
'denominator',
'from_bytes',
'imag',
'numerator',
'real',
'to_bytes']
-----
<class 'int'>.
======================================================================
ERROR: test_clip_default_inbounds_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
(y, ) = n0_clip_11(x, , )
^
SyntaxError: invalid syntax
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
x = dict_inputs['x']
(y, ) = n0_clip_11(x, , )
return {
'y': y,
}
======================================================================
ERROR: test_clip_default_int8_inbounds_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
(y, ) = n0_clip_11(x, , )
^
SyntaxError: invalid syntax
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
x = dict_inputs['x']
(y, ) = n0_clip_11(x, , )
return {
'y': y,
}
======================================================================
ERROR: test_clip_default_int8_max_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
(y, ) = n0_clip_11(x, , max)
^
SyntaxError: invalid syntax
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
x = dict_inputs['x']
max = dict_inputs['max']
(y, ) = n0_clip_11(x, , max)
return {
'y': y,
}
======================================================================
ERROR: test_clip_default_max_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
(y, ) = n0_clip_11(x, , max)
^
SyntaxError: invalid syntax
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
x = dict_inputs['x']
max = dict_inputs['max']
(y, ) = n0_clip_11(x, , max)
return {
'y': y,
}
======================================================================
ERROR: test_convinteger_with_padding_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ConvInteger' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_convinteger_without_padding_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ConvInteger' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_depthtospace_crd_mode_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'DepthToSpace' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_depthtospace_crd_mode_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'DepthToSpace' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_depthtospace_dcr_mode_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'DepthToSpace' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_depthtospace_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'DepthToSpace' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_dynamicquantizelinear_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1755, in _build_compile_run
raise NotImplementedError(
NotImplementedError: Not implemented for models including functions.
======================================================================
ERROR: test_dynamicquantizelinear_max_adjusted_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1755, in _build_compile_run
raise NotImplementedError(
NotImplementedError: Not implemented for models including functions.
======================================================================
ERROR: test_dynamicquantizelinear_min_adjusted_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1755, in _build_compile_run
raise NotImplementedError(
NotImplementedError: Not implemented for models including functions.
======================================================================
ERROR: test_gathernd_example_float32_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GatherND' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gathernd_example_int32_batch_dim1_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GatherND' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gathernd_example_int32_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GatherND' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_globalmaxpool_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GlobalMaxPool' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_globalmaxpool_precomputed_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GlobalMaxPool' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_aligncorners_true_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_bicubic_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_bilinear_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_border_padding_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_nearest_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_reflection_padding_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gridsample_zeros_padding_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GridSample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gru_batchwise_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GRU' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gru_defaults_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GRU' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gru_seq_length_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GRU' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gru_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'GRU' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_hardswish_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1755, in _build_compile_run
raise NotImplementedError(
NotImplementedError: Not implemented for models including functions.
======================================================================
ERROR: test_identity_opt_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n sequence_type {\n elem_type {\n tensor_type {\n elem_type: 1\n shape {\n dim {\n dim_value: 5\n }\n }\n }\n }\n }\n}\n'.
======================================================================
ERROR: test_if_opt_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 605, in to_sequence
outputs[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n sequence_type {\n elem_type {\n tensor_type {\n elem_type: 1\n shape {\n dim {\n dim_value: 5\n }\n }\n }\n }\n }\n}\n'.
======================================================================
ERROR: test_instancenorm_epsilon_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'InstanceNormalization' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_instancenorm_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'InstanceNormalization' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_loop16_seq_none_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n sequence_type {\n elem_type {\n tensor_type {\n elem_type: 1\n shape {\n }\n }\n }\n }\n}\n'.
======================================================================
ERROR: test_lrn_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LRN' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_lrn_default_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LRN' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_lstm_batchwise_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LSTM' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_lstm_defaults_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LSTM' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_lstm_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LSTM' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_lstm_with_peepholes_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LSTM' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_matmulinteger_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'MatMulInteger' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_max_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: _run() takes 3 positional arguments but 4 were given
======================================================================
ERROR: test_max_one_input_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
TypeError: _run() missing 1 required positional argument: 'b'
======================================================================
ERROR: test_maxunpool_export_with_output_shape_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'MaxUnpool' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_maxunpool_export_without_output_shape_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'MaxUnpool' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_min_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: _run() takes 3 positional arguments but 4 were given
======================================================================
ERROR: test_min_one_input_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
TypeError: _run() missing 1 required positional argument: 'b'
======================================================================
ERROR: test_momentum_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Momentum' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_momentum_multiple_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Momentum' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_mvn_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1755, in _build_compile_run
raise NotImplementedError(
NotImplementedError: Not implemented for models including functions.
======================================================================
ERROR: test_nesterov_momentum_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Momentum' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nllloss_NCd1_ii_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 7, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nllloss_NCd1_mean_weight_negative_ii_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nllloss_NCd1_weight_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nllloss_NCd1_weight_ii_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nllloss_NCd1d2_no_weight_reduction_mean_ii_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 7, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nllloss_NCd1d2_with_weight_reduction_mean_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nllloss_NCd1d2d3d4d5_mean_weight_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_nonmaxsuppression_center_point_box_format_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_flipped_coordinates_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_identical_boxes_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_limit_output_size_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_single_box_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_suppress_by_IOU_and_scores_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_suppress_by_IOU_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_two_batches_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonmaxsuppression_two_classes_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonMaxSuppression' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_nonzero_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'NonZero' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_onehot_negative_indices_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'OneHot' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_onehot_with_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'OneHot' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_onehot_with_negative_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'OneHot' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_onehot_without_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'OneHot' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_optional_get_element_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n tensor_type {\n elem_type: 1\n shape {\n dim {\n dim_value: 4\n }\n }\n }\n}\n'.
======================================================================
ERROR: test_optional_get_element_sequence_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n sequence_type {\n elem_type {\n tensor_type {\n elem_type: 6\n shape {\n dim {\n dim_value: 4\n }\n }\n }\n }\n }\n}\n'.
======================================================================
ERROR: test_optional_has_element_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n tensor_type {\n elem_type: 1\n shape {\n dim {\n dim_value: 4\n }\n }\n }\n}\n'.
======================================================================
ERROR: test_optional_has_element_empty_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 419, in _var_as_dict
dtype['optional'] = _var_as_dict(optional)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.Optional'> value is 'elem_type {\n tensor_type {\n elem_type: 6\n shape {\n }\n }\n}\n'.
======================================================================
ERROR: test_qlinearmatmul_2D_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'QLinearMatMul' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_qlinearmatmul_3D_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'QLinearMatMul' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_range_float_type_positive_delta_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 317, in preprocess_parameters
sess = rt_class(v['value'], runtime=runtime,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.ValueInfoProto'> value is 'name: "prev"\n'.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 221, in setup_runtime
self.preprocess_parameters(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 323, in preprocess_parameters
raise RuntimeError(
RuntimeError: Unable to instantiate a node of type 'Loop' and name ''.
======================================================================
ERROR: test_range_int32_type_negative_delta_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 317, in preprocess_parameters
sess = rt_class(v['value'], runtime=runtime,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 187, in _init
self.graph_ = self.to_sequence(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 595, in to_sequence
variables[obj.name] = _var_as_dict(obj)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnx_tools/onnx2py_helper.py", line 488, in _var_as_dict
raise NotImplementedError( # pragma: no cover
NotImplementedError: Unable to guess which object it is type is <class 'onnx.onnx_ml_pb2.ValueInfoProto'> value is 'name: "prev"\n'.
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 221, in setup_runtime
self.preprocess_parameters(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 323, in preprocess_parameters
raise RuntimeError(
RuntimeError: Unable to instantiate a node of type 'Loop' and name ''.
======================================================================
ERROR: test_reshape_allowzero_reordered_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
raise RuntimeError( # pragma: no cover
RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "data"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 0
}
dim {
dim_value: 3
}
dim {
dim_value: 4
}
}
}
}
.
======================================================================
ERROR: test_resize_downsample_scales_cubic_A_n0p5_exclude_outside_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_scales_cubic_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_scales_linear_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_scales_nearest_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_sizes_cubic_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_sizes_linear_pytorch_half_pixel_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_sizes_nearest_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_downsample_sizes_nearest_tf_half_pixel_for_nn_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_tf_crop_and_resize_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_cubic_A_n0p5_exclude_outside_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_cubic_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_cubic_asymmetric_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_cubic_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_linear_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_linear_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_scales_nearest_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_sizes_cubic_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_sizes_nearest_ceil_half_pixel_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_sizes_nearest_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_sizes_nearest_floor_align_corners_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_resize_upsample_sizes_nearest_round_prefer_ceil_asymmetric_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Resize' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_reversesequence_batch_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ReverseSequence' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_reversesequence_time_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ReverseSequence' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_rnn_seq_length_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 118, in load_op
return cl(onnx_node, desc=desc, runtme=runtime, **options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 156, in __init__
CommonRNN.__init__(self, onnx_node, desc=desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 40, in __init__
self.f1 = self.choose_act(self.activations[0],
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 57, in choose_act
raise RuntimeError( # pragma: no cover
RuntimeError: Unknown activation function 'tanh'.
======================================================================
ERROR: test_roialign_aligned_false_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'RoiAlign' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_roialign_aligned_true_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'RoiAlign' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_scan_sum_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
(y, z, ) = n0_scan(, initial, x)
^
SyntaxError: invalid syntax
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
initial = dict_inputs['initial']
x = dict_inputs['x']
(y, z, ) = n0_scan(, initial, x)
return {
'y': y,
'z': z,
}
======================================================================
ERROR: test_scatter_elements_without_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 8, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_scatter_elements.py", line 78, in _run
res = scatter_elements(data, indices, updates, axis=self.axis)
AttributeError: 'ScatterElements' object has no attribute 'axis'
======================================================================
ERROR: test_scatter_with_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Scatter' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_scatter_without_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Scatter' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_scatternd_add_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ScatterND' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_scatternd_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ScatterND' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_scatternd_multiply_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ScatterND' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sce_NCd1_mean_weight_negative_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_NCd1_mean_weight_negative_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 16, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_NCd1d2d3d4d5_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_NCd1d2d3d4d5_mean_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 16, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_no_weight_ii_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 14, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_no_weight_ii_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_no_weight_ii_4d_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 14, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_no_weight_ii_4d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_no_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 14, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_no_weight_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_ii_3d_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_ii_3d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 16, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_ii_4d_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_ii_4d_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 16, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_ii_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 15, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_ii_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 16, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_sce_mean_weight_log_prob_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 16, in compiled_run
TypeError: cannot unpack non-iterable numpy.float32 object
======================================================================
ERROR: test_shrink_hard_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Shrink' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_shrink_soft_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Shrink' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_simple_rnn_batchwise_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 118, in load_op
return cl(onnx_node, desc=desc, runtme=runtime, **options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 156, in __init__
CommonRNN.__init__(self, onnx_node, desc=desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 40, in __init__
self.f1 = self.choose_act(self.activations[0],
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 57, in choose_act
raise RuntimeError( # pragma: no cover
RuntimeError: Unknown activation function 'tanh'.
======================================================================
ERROR: test_simple_rnn_defaults_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 118, in load_op
return cl(onnx_node, desc=desc, runtme=runtime, **options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 156, in __init__
CommonRNN.__init__(self, onnx_node, desc=desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 40, in __init__
self.f1 = self.choose_act(self.activations[0],
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 57, in choose_act
raise RuntimeError( # pragma: no cover
RuntimeError: Unknown activation function 'tanh'.
======================================================================
ERROR: test_simple_rnn_with_initial_bias_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 118, in load_op
return cl(onnx_node, desc=desc, runtme=runtime, **options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 156, in __init__
CommonRNN.__init__(self, onnx_node, desc=desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 40, in __init__
self.f1 = self.choose_act(self.activations[0],
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_rnn.py", line 57, in choose_act
raise RuntimeError( # pragma: no cover
RuntimeError: Unknown activation function 'tanh'.
======================================================================
ERROR: test_slice_start_out_of_bounds_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
raise RuntimeError( # pragma: no cover
RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "y"
type {
tensor_type {
elem_type: 1
shape {
dim {
dim_value: 20
}
dim {
dim_value: 0
}
dim {
dim_value: 5
}
}
}
}
.
======================================================================
ERROR: test_softplus_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Softplus' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_softplus_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Softplus' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_softsign_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Softsign' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_softsign_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Softsign' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_spacetodepth_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SpaceToDepth' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_spacetodepth_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SpaceToDepth' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_tfidfvectorizer_tf_only_bigrams_skip0_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_tfidfvectorizer.py", line 54, in _run
return (res.reshape((x.shape[0], -1)), )
ValueError: cannot reshape array of size 7 into shape (12,newaxis)
======================================================================
ERROR: test_tfidfvectorizer_tf_onlybigrams_levelempty_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_tfidfvectorizer.py", line 54, in _run
return (res.reshape((x.shape[0], -1)), )
ValueError: cannot reshape array of size 3 into shape (12,newaxis)
======================================================================
ERROR: test_tfidfvectorizer_tf_onlybigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_tfidfvectorizer.py", line 54, in _run
return (res.reshape((x.shape[0], -1)), )
ValueError: cannot reshape array of size 7 into shape (12,newaxis)
======================================================================
ERROR: test_tfidfvectorizer_tf_uniandbigrams_skip5_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 320, in run
outputs = list(prepared_model.run(inputs))
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 83, in run
outs = self._session.run(feeds)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 875, in run
return self._run(inputs, clean_right_away=False,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 312, in _run_sequence_runtime_compiled
return self._run_compiled( # pylint: disable=E1101
File "<string>", line 6, in compiled_run
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/op_tfidfvectorizer.py", line 54, in _run
return (res.reshape((x.shape[0], -1)), )
ValueError: cannot reshape array of size 7 into shape (12,newaxis)
======================================================================
ERROR: test_thresholdedrelu_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ThresholdedRelu' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_thresholdedrelu_default_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ThresholdedRelu' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_thresholdedrelu_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'ThresholdedRelu' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_tile_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Tile' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_tile_precomputed_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Tile' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_tril_zero_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
raise RuntimeError( # pragma: no cover
RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "x"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 3
}
dim {
dim_value: 0
}
dim {
dim_value: 5
}
}
}
}
.
======================================================================
ERROR: test_triu_zero_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 213, in _init
raise RuntimeError( # pragma: no cover
RuntimeError: Wrong ONNX file, one input or output has an empty shape: name: "x"
type {
tensor_type {
elem_type: 7
shape {
dim {
dim_value: 0
}
dim {
dim_value: 5
}
}
}
}
.
======================================================================
ERROR: test_unique_not_sorted_without_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Unique' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_unique_sorted_with_axis_3d_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Unique' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_unique_sorted_with_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Unique' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_unique_sorted_with_negative_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Unique' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_unique_sorted_without_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Unique' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_upsample_nearest_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Upsample' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_AvgPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_unsqueeze_1(0)
(2, ) = n1_averagepool(1)
(3, ) = n2_squeeze_1(2)
return {
'3': 3,
}
======================================================================
ERROR: test_AvgPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_unsqueeze_1(0)
(2, ) = n1_averagepool(1)
(3, ) = n2_squeeze_1(2)
return {
'3': 3,
}
======================================================================
ERROR: test_AvgPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_averagepool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_AvgPool2d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_averagepool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_AvgPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_averagepool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_AvgPool3d_stride1_pad0_gpu_input_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_averagepool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_AvgPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_averagepool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_BatchNorm1d_3d_input_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 9
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# init: 3 (3)
# init: 4 (4)
# inputs
0 = dict_inputs['0']
(5, ) = n0_batchnormalization_14(0, 1, 2, 3, 4)
return {
'5': 5,
}
======================================================================
ERROR: test_BatchNorm2d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 9
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# init: 3 (3)
# init: 4 (4)
# inputs
0 = dict_inputs['0']
(5, ) = n0_batchnormalization_14(0, 1, 2, 3, 4)
return {
'5': 5,
}
======================================================================
ERROR: test_BatchNorm2d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 9
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# init: 3 (3)
# init: 4 (4)
# inputs
0 = dict_inputs['0']
(5, ) = n0_batchnormalization_14(0, 1, 2, 3, 4)
return {
'5': 5,
}
======================================================================
ERROR: test_BatchNorm3d_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 9
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# init: 3 (3)
# init: 4 (4)
# inputs
0 = dict_inputs['0']
(5, ) = n0_batchnormalization_14(0, 1, 2, 3, 4)
return {
'5': 5,
}
======================================================================
ERROR: test_BatchNorm3d_momentum_eval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 9
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# init: 3 (3)
# init: 4 (4)
# inputs
0 = dict_inputs['0']
(5, ) = n0_batchnormalization_14(0, 1, 2, 3, 4)
return {
'5': 5,
}
======================================================================
ERROR: test_ConstantPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_pad(0)
return {
'1': 1,
}
======================================================================
ERROR: test_Conv1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_pad1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_pad1size1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_pad2_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_pad2size1_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_depthwise_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_depthwise_padded_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_depthwise_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_depthwise_with_multiplier_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_groups_thnn_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_conv(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_Conv2d_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv2d_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv3d_dilated_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv3d_dilated_strided_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv3d_groups_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv3d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_conv(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_Conv3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Conv3d_stride_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_conv(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_ConvTranspose2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_convtranspose(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_ConvTranspose2d_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_convtranspose(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_ELU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_elu(0)
return {
'1': 1,
}
======================================================================
ERROR: test_Embedding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_gather(1, 0)
return {
'2': 2,
}
======================================================================
ERROR: test_Embedding_sparse_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_gather(1, 0)
return {
'2': 2,
}
======================================================================
ERROR: test_GLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, 2, ) = n0_split_2(0)
(3, ) = n1_sigmoid(2)
(4, ) = n2_mul(1, 3)
return {
'4': 4,
}
======================================================================
ERROR: test_GLU_dim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, 2, ) = n0_split_2(0)
(3, ) = n1_sigmoid(2)
(4, ) = n2_mul(1, 3)
return {
'4': 4,
}
======================================================================
ERROR: test_LeakyReLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_leakyrelu(0)
return {
'1': 1,
}
======================================================================
ERROR: test_LeakyReLU_with_negval_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_leakyrelu(0)
return {
'1': 1,
}
======================================================================
ERROR: test_Linear_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 7
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# init: 2 (2)
# inputs
0 = dict_inputs['0']
(3, ) = n0_gemm(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_Linear_no_bias_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_transpose(1)
(3, ) = n1_matmul(0, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_LogSoftmax_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_logsoftmax(0)
return {
'1': 1,
}
======================================================================
ERROR: test_MaxPool1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_MaxPool1d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_MaxPool2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_MaxPool3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_MaxPool3d_stride_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_MaxPool3d_stride_padding_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_PReLU_1d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_prelu(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_PReLU_1d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_prelu(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_PReLU_2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_prelu(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_PReLU_2d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_prelu(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_PReLU_3d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_prelu(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_PReLU_3d_multiparam_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_prelu(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_PixelShuffle_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_constant_12()
(2, ) = n1_reshape_5(0, 1)
(3, ) = n2_transpose(2)
(4, ) = n3_constant_12()
(5, ) = n4_reshape_5(3, 4)
return {
'5': 5,
}
======================================================================
ERROR: test_PoissonNLLLLoss_no_reduce_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_constant_12()
(2, ) = n1_exp(0)
(3, ) = n2_mul(1, 0)
(4, ) = n3_sub(2, 3)
return {
'4': 4,
}
======================================================================
ERROR: test_ReLU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_relu(0)
return {
'1': 1,
}
======================================================================
ERROR: test_ReflectionPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_pad(0)
return {
'1': 1,
}
======================================================================
ERROR: test_ReplicationPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_pad(0)
return {
'1': 1,
}
======================================================================
ERROR: test_SELU_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_selu(0)
return {
'1': 1,
}
======================================================================
ERROR: test_Sigmoid_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_sigmoid(0)
return {
'1': 1,
}
======================================================================
ERROR: test_Softmax_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_softmax(0)
return {
'1': 1,
}
======================================================================
ERROR: test_Softmin_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_neg(0)
(2, ) = n1_softmax(1)
return {
'2': 2,
}
======================================================================
ERROR: test_Softplus_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Softplus' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_Softsign_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_abs(0)
(2, ) = n1_constant_12()
(3, ) = n2_add(1, 2)
(4, ) = n3_div(0, 3)
return {
'4': 4,
}
======================================================================
ERROR: test_Tanh_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_tanh(0)
return {
'1': 1,
}
======================================================================
ERROR: test_ZeroPad2d_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_pad(0)
return {
'1': 1,
}
======================================================================
ERROR: test_log_softmax_dim3_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_logsoftmax(0)
return {
'1': 1,
}
======================================================================
ERROR: test_log_softmax_lastdim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_logsoftmax(0)
return {
'1': 1,
}
======================================================================
ERROR: test_softmax_functional_dim3_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_softmax(0)
return {
'1': 1,
}
======================================================================
ERROR: test_softmax_lastdim_cpu (__main__.OnnxBackendPyTorchConvertedModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_softmax(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_add_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_add(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_add_size1_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_add(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_add_size1_right_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_add(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_add_size1_singleton_broadcast_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_add(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_addconstant_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_constant_12()
(2, ) = n1_add(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_addmm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
2 = dict_inputs['2']
(3, ) = n0_gemm(0, 1, 2)
(4, ) = n1_gemm(0, 1, 3)
return {
'4': 4,
}
======================================================================
ERROR: test_operator_basic_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_add(0, 1)
(3, ) = n1_mul(0, 2)
(4, ) = n2_tanh(3)
(5, ) = n3_sigmoid(4)
(6, ) = n4_neg(5)
return {
'6': 6,
}
======================================================================
ERROR: test_operator_chunk_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, 2, ) = n0_split_2(0)
return {
'1': 1,
'2': 2,
}
======================================================================
ERROR: test_operator_clip_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_clip_6(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_concat2_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_concat(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_conv_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_conv(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_convtranspose_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_convtranspose(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_exp_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_exp(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_flatten_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_flatten(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_index_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_slice_1(0)
(2, ) = n1_squeeze_1(1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_max_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_max(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_maxpool_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_maxpool(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_min_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_min(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_mm_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_constant_12()
(3, ) = n1_gemm(0, 1, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_operator_non_float_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_add(0, 1)
(3, ) = n1_mul(0, 2)
return {
'3': 3,
}
======================================================================
ERROR: test_operator_pad_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_pad(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_params_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 6
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# init: 1 (1)
# inputs
0 = dict_inputs['0']
(2, ) = n0_add(0, 1)
(3, ) = n1_mul(0, 2)
(4, ) = n2_tanh(3)
(5, ) = n3_sigmoid(4)
(6, ) = n4_neg(5)
return {
'6': 6,
}
======================================================================
ERROR: test_operator_permute2_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_transpose(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_pow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
(2, ) = n0_pow(0, 1)
return {
'2': 2,
}
======================================================================
ERROR: test_operator_reduced_mean_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_reducemean(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_reduced_mean_keepdim_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_reducemean(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_reduced_sum_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_reducesum_1(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_reduced_sum_keepdim_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_reducesum_1(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_repeat_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Tile' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_operator_repeat_dim_overflow_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Tile' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_operator_selu_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_selu(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_sqrt_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_sqrt(0)
return {
'1': 1,
}
======================================================================
ERROR: test_operator_symbolic_override_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'InstanceNormalization' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_operator_symbolic_override_nested_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
1 = dict_inputs['1']
2 = dict_inputs['2']
(3, ) = n0_sum(0, 1, 2)
(4, ) = n1_neg(0)
(5, ) = n2_neg(1)
return {
'3': 3,
'4': 4,
'5': 5,
}
======================================================================
ERROR: test_operator_view_cpu (__main__.OnnxBackendPyTorchOperatorModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1791, in _build_compile_run
obj = compile(final_code, "<string>", 'exec')
File "<string>", line 5
0 = dict_inputs['0']
^
SyntaxError: cannot assign to literal
The above exception was the direct cause of the following exception:
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 294, in _init
_, fct, code = self._build_compile_run('debug' in self.runtime)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 1793, in _build_compile_run
raise SyntaxError(
File "<string>", line None
SyntaxError: Unable to compile
#####
def compiled_run(dict_inputs, yield_ops=None):
if yield_ops is not None:
raise NotImplementedError('yields_ops should be None.')
# inputs
0 = dict_inputs['0']
(1, ) = n0_flatten(0)
return {
'1': 1,
}
======================================================================
ERROR: test_bvlc_alexnet_cpu (__main__.OnnxBackendRealModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'LRN' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gradient_of_add_and_mul_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Gradient' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_gradient_of_add_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Gradient' from domain 'ai.onnx.preview.training' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sequence_model1_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SequenceEmpty' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sequence_model2_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SequenceErase' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sequence_model3_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SequenceErase' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sequence_model6_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SplitToSequence' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sequence_model7_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SplitToSequence' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_sequence_model8_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'SplitToSequence' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
ERROR: test_shrink_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 295, in run
prepared_model = self.backend.prepare(model, device)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 221, in prepare
return cls.prepare(binm, device, **kwargs)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 202, in prepare
inf = cls.create_inference_session(model)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/backend.py", line 256, in create_inference_session
return OnnxInference(model, runtime='python_compiled')
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 142, in __init__
self._init(existing_functions)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference.py", line 260, in _init
node.setup_runtime(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 260, in setup_runtime
raise e
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/onnx_inference_node.py", line 243, in setup_runtime
self.ops_ = load_op(self.onnx_node, desc=self.desc,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops.py", line 36, in load_op
return lo(onnx_node, desc=desc, options=options)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_doc/sphinxdoc/source/mlprodict/onnxrt/ops_cpu/__init__.py", line 85, in load_op
raise MissingOperatorError( # pragma no cover
mlprodict.onnxrt.excs.MissingOperatorError: Operator 'Shrink' from domain '' has no runtime yet. Available list:
- Abs Acos Acosh Add And ArgMax ArgMin ArrayFeatureExtractor Asin Asinh
Atan Atanh AveragePool BatchNormalization Bernoulli Binarizer BitShift
BroadcastGradientArgs CDist Cast CastLike CategoryMapper Ceil Celu
Clip ComplexAbs Compress Concat ConcatFromSequence Constant
ConstantOfShape Conv ConvTranspose Cos Cosh CumSum DEBUG
DequantizeLinear Det DictVectorizer Div Dropout Einsum Elu Equal Erf
Exp Expand EyeLike FFT FFT2D FeatureVectorizer Flatten Floor
FusedMatMul Gather GatherElements Gemm GlobalAveragePool Greater
GreaterOrEqual HardSigmoid Hardmax Identity If Imputer IsInf IsNaN
LabelEncoder LeakyRelu Less LessOrEqual LinearClassifier
LinearRegressor Log LogSoftmax Loop LpNormalization MatMul Max MaxPool
Mean Min Mod Mul Neg NegativeLogLikelihoodLoss Normalizer Not
OneHotEncoder OpRun Or PRelu Pad Pow QLinearConv QuantizeLinear RFFT
RNN RandomNormal RandomNormalLike RandomUniform RandomUniformLike
Range Reciprocal ReduceL1 ReduceL2 ReduceLogSum ReduceLogSumExp
ReduceMax ReduceMean ReduceMin ReduceProd ReduceSum ReduceSumSquare
Relu Reshape Round SVMClassifier SVMClassifierDouble SVMRegressor
SVMRegressorDouble Scaler Scan ScatterElements Selu SequenceAt
SequenceConstruct SequenceInsert Shape Sigmoid Sign Sin Sinh Size
Slice Softmax SoftmaxCrossEntropyLoss SoftmaxGrad Solve Split Sqrt
Squeeze StringNormalizer Sub Sum Tan Tanh TfIdfVectorizer Tokenizer
TopK Transpose TreeEnsembleClassifier TreeEnsembleClassifierDouble
TreeEnsembleRegressor TreeEnsembleRegressorDouble Trilu Unsqueeze
Where Xor YieldOp ZipMap
======================================================================
FAIL: test_bernoulli_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 4 / 10 (40%)
Max absolute difference: 1.
Max relative difference: 1.
x: array([0., 1., 0., 1., 1., 1., 1., 1., 1., 1.])
y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
======================================================================
FAIL: test_bernoulli_double_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 3 / 10 (30%)
Max absolute difference: 1.
Max relative difference: 1.
x: array([0., 1., 0., 0., 0., 1., 1., 1., 1., 0.])
y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
======================================================================
FAIL: test_bernoulli_double_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 7 / 10 (70%)
Max absolute difference: 1.
Max relative difference: 1.
x: array([0., 1., 0., 1., 1., 0., 1., 0., 0., 1.])
y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
======================================================================
FAIL: test_bernoulli_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 7 / 10 (70%)
Max absolute difference: 1.
Max relative difference: 1.
x: array([1., 0., 0., 0., 0., 0., 1., 0., 0., 1.])
y: array([0., 1., 1., 0., 0., 1., 0., 1., 1., 1.])
======================================================================
FAIL: test_cast_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 187, in assert_similar_outputs
np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 425, in assert_equal
raise AssertionError(msg)
AssertionError:
Items are not equal:
ACTUAL: dtype('<U32')
DESIRED: dtype('O')
======================================================================
FAIL: test_castlike_FLOAT_to_STRING_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
Mismatched elements: 12 / 12 (100%)
x: array([[0.9767611026763916, 0.6048455238342285, 0.7392635941505432,
0.03918779268860817],
[0.28280696272850037, 0.12019655853509903, 0.296140193939209,...
y: array([['0.9767611', '0.6048455', '0.7392636', '0.039187793'],
['0.28280696', '0.12019656', '0.2961402', '0.11872772'],
['0.31798318', '0.41426298', '0.064147495', '0.6924721']],
dtype=object)
======================================================================
FAIL: test_castlike_FLOAT_to_STRING_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 187, in assert_similar_outputs
np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 425, in assert_equal
raise AssertionError(msg)
AssertionError:
Items are not equal:
ACTUAL: dtype('<U32')
DESIRED: dtype('O')
======================================================================
FAIL: test_convtranspose_autopad_same_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (1, 2, 7, 7), (1, 2, 6, 6) mismatch)
x: array([[[[ 0., 0., 1., 1., 3., 2., 2.],
[ 0., 0., 1., 1., 3., 2., 2.],
[ 3., 3., 8., 5., 12., 7., 7.],...
y: array([[[[ 0., 0., 1., 1., 3., 2.],
[ 0., 0., 1., 1., 3., 2.],
[ 3., 3., 8., 5., 12., 7.],...
======================================================================
FAIL: test_convtranspose_output_shape_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (1, 2, 9, 7), (1, 2, 10, 8) mismatch)
x: array([[[[ 0., 0., 1., 1., 3., 2., 2.],
[ 0., 0., 1., 1., 3., 2., 2.],
[ 0., 0., 1., 1., 3., 2., 2.],...
y: array([[[[ 0., 0., 1., 1., 3., 2., 2., 0.],
[ 0., 0., 1., 1., 3., 2., 2., 0.],
[ 0., 0., 1., 1., 3., 2., 2., 0.],...
======================================================================
FAIL: test_dynamicquantizelinear_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 2 / 6 (33.3%)
Max absolute difference: 255
Max relative difference: 9.808
x: array([153, 255, 0, 25, 221, 178], dtype=uint8)
y: array([153, 255, 0, 26, 221, 179], dtype=uint8)
======================================================================
FAIL: test_dynamicquantizelinear_max_adjusted_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 1 / 6 (16.7%)
Max absolute difference: 255
Max relative difference: 2.656
x: array([191, 121, 172, 95, 42, 0], dtype=uint8)
y: array([191, 121, 172, 96, 42, 0], dtype=uint8)
======================================================================
FAIL: test_dynamicquantizelinear_min_adjusted_expanded_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 7 / 12 (58.3%)
Max absolute difference: 255
Max relative difference: 3.984
x: array([[ 63, 133, 82, 159],
[212, 255, 95, 165],
[248, 255, 191, 149]], dtype=uint8)
y: array([[ 64, 134, 83, 159],
[213, 255, 96, 166],
[249, 255, 191, 149]], dtype=uint8)
======================================================================
FAIL: test_eyelike_without_dtype_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 187, in assert_similar_outputs
np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 425, in assert_equal
raise AssertionError(msg)
AssertionError:
Items are not equal:
ACTUAL: dtype('float32')
DESIRED: dtype('int32')
======================================================================
FAIL: test_isinf_negative_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 3 / 6 (50%)
x: array([False, False, True, False, False, True])
y: array([False, False, False, False, True, False])
======================================================================
FAIL: test_isinf_positive_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 3 / 6 (50%)
x: array([False, False, False, False, True, False])
y: array([False, False, True, False, False, True])
======================================================================
FAIL: test_logsoftmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 60 / 60 (100%)
Max absolute difference: 1.374
Max relative difference: 0.679
x: array([[[-0.63786 , -2.150428, -0.994332, -0.32463 , -0.607985],
[-1.424634, -1.600497, -1.821713, -2.462304, -2.064944],
[-2.257869, -1.096312, -1.212032, -2.443848, -2.031679],...
y: array([[[-1.488776, -2.852671, -2.27409 , -1.011935, -1.38527 ],
[-1.222501, -1.24969 , -2.048422, -2.09656 , -1.78918 ],
[-2.184687, -0.874457, -1.567693, -2.207056, -1.884868],...
======================================================================
FAIL: test_loop11_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (1,), (5, 1) mismatch)
x: array([13.], dtype=float32)
y: array([[-1.],
[ 1.],
[ 4.],...
======================================================================
FAIL: test_maxpool_2d_uint8_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 187, in assert_similar_outputs
np.testing.assert_equal(outputs[i].dtype, ref_outputs[i].dtype)
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 425, in assert_equal
raise AssertionError(msg)
AssertionError:
Items are not equal:
ACTUAL: dtype('float64')
DESIRED: dtype('uint8')
======================================================================
FAIL: test_mod_int64_fmod_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 2 / 6 (33.3%)
Max absolute difference: 3
Max relative difference: 3.
x: array([ 0, -2, 5, 0, 2, 3])
y: array([ 0, 1, 5, 0, -1, 3])
======================================================================
FAIL: test_mod_mixed_sign_float16_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 4 / 6 (66.7%)
Max absolute difference: 3.4
Max relative difference: 20.67
x: array([ 1.998, -3.002, 5. , -1.998, 3.002, 3. ], dtype=float16)
y: array([-0.10156, 0.3984 , 5. , 0.10156, -0.3984 , 3. ],
dtype=float16)
======================================================================
FAIL: test_mod_mixed_sign_float32_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 4 / 6 (66.7%)
Max absolute difference: 3.4
Max relative difference: 21.
x: array([ 2., -3., 5., -2., 3., 3.], dtype=float32)
y: array([-0.1, 0.4, 5. , 0.1, -0.4, 3. ], dtype=float32)
======================================================================
FAIL: test_mod_mixed_sign_float64_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 4 / 6 (66.7%)
Max absolute difference: 3.4
Max relative difference: 21.
x: array([ 2., -3., 5., -2., 3., 3.])
y: array([-0.1, 0.4, 5. , 0.1, -0.4, 3. ])
======================================================================
FAIL: test_quantizelinear_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 1 / 6 (16.7%)
Max absolute difference: 255
Max relative difference: 1.962
x: array([128, 129, 129, 255, 1, 0], dtype=uint8)
y: array([128, 129, 130, 255, 1, 0], dtype=uint8)
======================================================================
FAIL: test_scatter_elements_with_duplicate_indices_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 1 / 5 (20%)
Max absolute difference: 3.1
Max relative difference: 0.596
x: array([[1. , 2.1, 3. , 4. , 5. ]], dtype=float32)
y: array([[1. , 5.2, 3. , 4. , 5. ]], dtype=float32)
======================================================================
FAIL: test_selu_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 28 / 60 (46.7%)
Max absolute difference: 3.689
Max relative difference: 0.667
x: array([[[ 5.292157, 1.200472, 2.936214, 6.722679, 5.602674],
[-1.247332, 2.850265, -0.280919, -0.196141, 1.231796],
[ 0.432131, 4.362821, 2.283113, 0.365025, 1.33159 ],...
y: array([[[ 5.292157, 1.200472, 2.936214, 6.722679, 5.602674],
[-3.741995, 2.850265, -0.842756, -0.588423, 1.231796],
[ 0.432131, 4.362821, 2.283113, 0.365025, 1.33159 ],...
======================================================================
FAIL: test_selu_default_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 28 / 60 (46.7%)
Max absolute difference: 0.078
Max relative difference: 0.048
x: array([[[ 1.853492, 0.420446, 1.028361, 2.354509, 1.962245],
[-1.043557, 0.998259, -0.235026, -0.164098, 0.431416],
[ 0.151347, 1.528007, 0.799623, 0.127844, 0.466368],...
y: array([[[ 1.853492, 0.420446, 1.028361, 2.354509, 1.962245],
[-1.096467, 0.998259, -0.246942, -0.172418, 0.431416],
[ 0.151347, 1.528007, 0.799623, 0.127844, 0.466368],...
======================================================================
FAIL: test_selu_example_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 1 / 3 (33.3%)
Max absolute difference: 2.528
Max relative difference: 0.667
x: array([-1.264241, 0. , 3. ], dtype=float32)
y: array([-3.792723, 0. , 3. ], dtype=float32)
======================================================================
FAIL: test_shape_end_1_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (3,), (1,) mismatch)
x: array([3, 4, 5])
y: array([3])
======================================================================
FAIL: test_shape_end_negative_1_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (3,), (2,) mismatch)
x: array([3, 4, 5])
y: array([3, 4])
======================================================================
FAIL: test_shape_start_1_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (3,), (2,) mismatch)
x: array([3, 4, 5])
y: array([4, 5])
======================================================================
FAIL: test_shape_start_1_end_2_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (3,), (1,) mismatch)
x: array([3, 4, 5])
y: array([4])
======================================================================
FAIL: test_shape_start_1_end_negative_1_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (3,), (1,) mismatch)
x: array([3, 4, 5])
y: array([4])
======================================================================
FAIL: test_shape_start_negative_1_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
(shapes (3,), (1,) mismatch)
x: array([3, 4, 5])
y: array([5])
======================================================================
FAIL: test_softmax_default_axis_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 191, in assert_similar_outputs
np.testing.assert_allclose(
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 1530, in assert_allclose
assert_array_compare(compare, actual, desired, err_msg=str(err_msg),
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 844, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Not equal to tolerance rtol=0.001, atol=1e-07
Mismatched elements: 60 / 60 (100%)
Max absolute difference: 0.359
Max relative difference: 2.949
x: array([[[0.528422, 0.116434, 0.369971, 0.722795, 0.544447],
[0.240596, 0.201796, 0.161748, 0.085238, 0.126825],
[0.104573, 0.334101, 0.297592, 0.086826, 0.131115],...
y: array([[[0.225649, 0.05769 , 0.10289 , 0.363515, 0.250256],
[0.294493, 0.286594, 0.128938, 0.122878, 0.167097],
[0.112513, 0.417088, 0.208526, 0.110024, 0.151849],...
======================================================================
FAIL: test_strnormalizer_export_monday_casesensintive_lower_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (4,), (3,) mismatch)
x: array(['', 'tuesday', 'wednesday', 'thursday'], dtype=object)
y: array(['tuesday', 'wednesday', 'thursday'], dtype=object)
======================================================================
FAIL: test_strnormalizer_export_monday_casesensintive_nochangecase_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (4,), (3,) mismatch)
x: array(['', 'tuesday', 'wednesday', 'thursday'], dtype=object)
y: array(['tuesday', 'wednesday', 'thursday'], dtype=object)
======================================================================
FAIL: test_strnormalizer_export_monday_casesensintive_upper_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (4,), (3,) mismatch)
x: array(['', 'TUESDAY', 'WEDNESDAY', 'THURSDAY'], dtype=object)
y: array(['TUESDAY', 'WEDNESDAY', 'THURSDAY'], dtype=object)
======================================================================
FAIL: test_strnormalizer_export_monday_empty_output_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (2,), (1,) mismatch)
x: array(['', ''], dtype=object)
y: array([''], dtype=object)
======================================================================
FAIL: test_strnormalizer_export_monday_insensintive_upper_twodim_cpu (__main__.OnnxBackendNodeModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (1, 6), (1, 4) mismatch)
x: array([['MONDAY', 'TUESDAY', 'WEDNESDAY', 'MONDAY', 'TUESDAY',
'WEDNESDAY']], dtype=object)
y: array([['TUESDAY', 'WEDNESDAY', 'TUESDAY', 'WEDNESDAY']], dtype=object)
======================================================================
FAIL: test_strnorm_model_monday_casesensintive_lower_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (4,), (3,) mismatch)
x: array(['', 'tuesday', 'wednesday', 'thursday'], dtype=object)
y: array(['tuesday', 'wednesday', 'thursday'], dtype=object)
======================================================================
FAIL: test_strnorm_model_monday_casesensintive_nochangecase_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (4,), (3,) mismatch)
x: array(['', 'tuesday', 'wednesday', 'thursday'], dtype=object)
y: array(['tuesday', 'wednesday', 'thursday'], dtype=object)
======================================================================
FAIL: test_strnorm_model_monday_casesensintive_upper_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (4,), (3,) mismatch)
x: array(['', 'TUESDAY', 'WEDNESDAY', 'THURSDAY'], dtype=object)
y: array(['TUESDAY', 'WEDNESDAY', 'THURSDAY'], dtype=object)
======================================================================
FAIL: test_strnorm_model_monday_empty_output_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (2,), (1,) mismatch)
x: array(['MONDAY', 'MONDAY'], dtype=object)
y: array([''], dtype=object)
======================================================================
FAIL: test_strnorm_model_monday_insensintive_upper_twodim_cpu (__main__.OnnxBackendSimpleModelTest)
----------------------------------------------------------------------
Traceback (most recent call last):
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 265, in device_test_func
return test_func(*args, device=device, **kwargs)
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 321, in run
self.assert_similar_outputs(ref_outputs, outputs,
File "/usr/local/lib/python3.9/site-packages/onnx/backend/test/runner/__init__.py", line 189, in assert_similar_outputs
np.testing.assert_array_equal(outputs[i], ref_outputs[i])
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 934, in assert_array_equal
assert_array_compare(operator.__eq__, x, y, err_msg=err_msg,
File "/var/lib/jenkins/workspace/mlprodict/mlprodict_UT_39_std/_venv/lib/python3.9/site-packages/numpy/testing/_private/utils.py", line 763, in assert_array_compare
raise AssertionError(msg)
AssertionError:
Arrays are not equal
(shapes (1, 6), (1, 4) mismatch)
x: array([['MONDAY', 'TUESDAY', 'WEDNESDAY', 'MONDAY', 'TUESDAY',
'WEDNESDAY']], dtype=object)
y: array([['TUESDAY', 'WEDNESDAY', 'TUESDAY', 'WEDNESDAY']], dtype=object)
----------------------------------------------------------------------
Ran 2026 tests in 28.789s
FAILED (failures=44, errors=303, skipped=1021)