__init__ |
module mlprodict Python runtime for ONNX and others tools to help converting investigate issues with ONNX models. source on GitHub |
__init__ |
module mlprodict.asv_benchmark Shortcuts to asv_benchmark. source on GitHub |
__init__ |
module mlprodict.cli Shortcut to cli. source on GitHub |
__init__ |
module mlprodict.grammar.cc Shortcuts to cc. source on GitHub |
__init__ |
module mlprodict.grammar.grammar_sklearn Shortcuts to grammar_sklearn. source on GitHub |
__init__ |
module mlprodict.grammar.grammar_sklearn.grammar Shortcuts to grammar. source on GitHub |
__init__ |
module mlprodict.npy Shortcut to npy. .. versionadded:: 0.6 source on GitHub |
__init__ |
module mlprodict.npy._cache Cache documentation for OnnxOps. .. versionadded:: 0.9 source on GitHub |
__init__ |
module mlprodict.onnx_conv Shortcut to onnx_conv. Importing this file means importing sklearn-onnx. source on GitHub |
__init__ |
module mlprodict.onnx_conv.onnx_ops Shortcuts to onnx_ops. source on GitHub |
__init__ |
module mlprodict.onnx_conv.operator_converters Shortcut to operator_converters. source on GitHub |
__init__ |
module mlprodict.onnx_conv.scorers Shortcuts to scorers. source on GitHub |
__init__ |
module mlprodict.onnx_conv.sklconv Shortcuts to sklconv. source on GitHub |
__init__ |
module mlprodict.onnx_tools Shortcut to onnx_tools. source on GitHub |
__init__ |
module mlprodict.onnx_tools.onnx_grammar Shortcut to onnx_grammar. source on GitHub |
__init__ |
module mlprodict.onnx_tools.optim Shortcuts to onnx_tools.optim. source on GitHub |
__init__ |
module mlprodict.onnxrt Shortcut to onnxrt. source on GitHub |
__init__ |
module mlprodict.onnxrt.doc Helper for documentation. source on GitHub |
__init__ |
module mlprodict.onnxrt.ops_cpu Shortcut to ops_cpu. source on GitHub |
__init__ |
module mlprodict.onnxrt.ops_empty Shortcut to ops_cpu. source on GitHub |
__init__ |
module mlprodict.onnxrt.ops_onnxruntime Shortcut to ops_cpu. source on GitHub |
__init__ |
module mlprodict.onnxrt.ops_shape Shortcut to ops_shape. source on GitHub |
__init__ |
module mlprodict.onnxrt.ops_whole Shortcut to ops_whole. source on GitHub |
__init__ |
module mlprodict.onnxrt.validate Functions to validate converted models and runtime. source on GitHub |
__init__ |
module mlprodict.onnxrt.validate.data Datasets to tests models. source on GitHub |
__init__ |
module mlprodict.plotting Shortcuts to plotting. source on GitHub |
__init__ |
module mlprodict.sklapi Shortcut to sklapi. Importing this file imports sklearn-onnx as well. source on GitHub |
__init__ |
module mlprodict.testing Shortcut to testing. source on GitHub |
__init__ |
module mlprodict.testing.einsum Shortcut to testing.einsum. source on GitHub |
__init__ |
module mlprodict.testing.experimental_c_impl Shortcut to testing.experimental_c. source on GitHub |
__init__ |
module mlprodict.testing.test_utils Inspired from sklearn-onnx, handles two backends. source on GitHub |
__init__ |
module mlprodict.tools Shortcuts to tools. source on GitHub |
__main__ |
module mlprodict.__main__ Implements command line python -m mlprodict <command> <args> . source on GitHub |
_create_asv_helper |
module mlprodict.asv_benchmark._create_asv_helper Functions to creates a benchmark based on asv for many regressors and classifiers. source on GitHub |
_element_unary |
module mlprodict.onnxrt.ops_shape._element_unary Computes shape inference for element wise operators with one input. source on GitHub |
_element_wise |
module mlprodict.onnxrt.ops_shape._element_wise Computes shape inference for element wise operators. source on GitHub |
_main_onnx_optim |
module mlprodict.onnx_tools.optim._main_onnx_optim Calls all possible ONNX optimisations. source on GitHub |
_new_ops |
module mlprodict.onnxrt.ops_cpu._new_ops Defines new operators. source on GitHub |
_onnx_optimisation_common |
module mlprodict.onnx_tools.optim._onnx_optimisation_common Common functions to reduce the number of nodes of an ONNX graphs. source on GitHub |
_op |
module mlprodict.onnxrt.ops_cpu._op Shortcut to ops_cpu. source on GitHub |
_op |
module mlprodict.onnxrt.ops_empty._op Shortcut to ops_onnxruntime. source on GitHub |
_op |
module mlprodict.onnxrt.ops_onnxruntime._op Shortcut to ops_onnxruntime. source on GitHub |
_op_classifier_string |
module mlprodict.onnxrt.ops_cpu._op_classifier_string Common class for classifiers supporting strings. source on GitHub |
_op_helper |
module mlprodict.onnxrt.ops_cpu._op_helper Runtime operator. source on GitHub |
_op_list |
module mlprodict.onnxrt.ops_cpu._op_list Imports runtime operators. source on GitHub |
_op_numpy_helper |
module mlprodict.onnxrt.ops_cpu._op_numpy_helper numpy redundant functions. source on GitHub |
_op_onnx_numpy.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu._op_onnx_numpy C++ helpers of ONNX operators. source on GitHub |
_op_shape_op |
module mlprodict.onnxrt.ops_shape._op_shape_op Computes shape inference for onnx operators. source on GitHub |
_validate_problems_helper |
module mlprodict.onnxrt.validate._validate_problems_helper Validates runtime for many :scikit-learn: operators. The submodule relies on onnxconverter_common, sklearn-onnx. source on GitHub |
api_extension |
module mlprodict.grammar.grammar_sklearn.grammar.api_extension Implements decorators to extend the API. source on GitHub |
asv2csv |
module mlprodict.cli.asv2csv Command line about exporting asv results into a dataframe. source on GitHub |
asv_bench |
module mlprodict.cli.asv_bench Command line about validation of prediction runtime. source on GitHub |
asv_exports |
module mlprodict.asv_benchmark.asv_exports Functions to help exporting json format into text. source on GitHub |
asv_options_helper |
module mlprodict.tools.asv_options_helper Functions to show shortened options in asv benchmarks. source on GitHub |
backend |
module mlprodict.onnxrt.backend ONNX Backend for OnnxInference . :: import unittest from onnx.backend.test import BackendTest backend_test = BackendTest(backend, __name__) back_test.include(‘.*add.*’) globals().update(backend_test.enable_report().test_cases) unittest.main() source on GitHub |
backend_micropy |
module mlprodict.onnxrt.backend_micropy ONNX Backend for OnnxInference . :: import unittest from contextlib import redirect_stdout, redirect_stderr from io import StringIO from onnx.backend.test import BackendTest import mlprodict.onnxrt.backend_micropy as backend back_test = BackendTest(backend, __name__) back_test.exclude(‘.*_blvc_.*’) back_test.exclude(‘.*_densenet_.*’) back_test.exclude(‘.*_densenet121_.*’) back_test.exclude(‘.*_inception_.*’) back_test.exclude(‘.*_resnet50_.*’) back_test.exclude(‘.*_shufflenet_.*’) back_test.exclude(‘.*_squeezenet_.*’) back_test.exclude(‘.*_vgg19_.*’) back_test.exclude(‘.*_zfnet512_.*’) globals().update(back_test.enable_report().test_cases) buffer = StringIO() print(’———————————’) if True: with redirect_stdout(buffer): with redirect_stderr(buffer): res = unittest.main(verbosity=2, exit=False) else: res = unittest.main(verbosity=2, exit=False) testsRun = res.result.testsRun errors = len(res.result.errors) skipped = len(res.result.skipped) unexpectedSuccesses = len(res.result.unexpectedSuccesses) expectedFailures = len(res.result.expectedFailures) print(’———————————’) print(“testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d ” “expectedFailures=%d” % ( testsRun, errors, skipped, unexpectedSuccesses, expectedFailures)) ran = testsRun - skipped print(“ratio=%f” % (1 - errors * 1.0 / ran)) print(’———————————’) lines = buffer.getvalue().split(’n’) print(”n”.join(line for line in lines if “skipped ‘no matched include pattern’” not in line)) source on GitHub |
backend_ort |
module mlprodict.onnxrt.backend_ort ONNX Backend for OnnxInference . :: import unittest from contextlib import redirect_stdout, redirect_stderr from io import StringIO from onnx.backend.test import BackendTest import mlprodict.onnxrt.backend_ort as backend back_test = BackendTest(backend, __name__) back_test.exclude(‘.*_blvc_.*’) back_test.exclude(‘.*_densenet_.*’) back_test.exclude(‘.*_densenet121_.*’) back_test.exclude(‘.*_inception_.*’) back_test.exclude(‘.*_resnet50_.*’) back_test.exclude(‘.*_shufflenet_.*’) back_test.exclude(‘.*_squeezenet_.*’) back_test.exclude(‘.*_vgg19_.*’) back_test.exclude(‘.*_zfnet512_.*’) globals().update(back_test.enable_report().test_cases) buffer = StringIO() print(’———————————’) if True: with redirect_stdout(buffer): with redirect_stderr(buffer): res = unittest.main(verbosity=2, exit=False) else: res = unittest.main(verbosity=2, exit=False) testsRun = res.result.testsRun errors = len(res.result.errors) skipped = len(res.result.skipped) unexpectedSuccesses = len(res.result.unexpectedSuccesses) expectedFailures = len(res.result.expectedFailures) print(’———————————’) print(“testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d ” “expectedFailures=%d” % ( testsRun, errors, skipped, unexpectedSuccesses, expectedFailures)) ran = testsRun - skipped print(“ratio=%f” % (1 - errors * 1.0 / ran)) print(’———————————’) lines = buffer.getvalue().split(’n’) print(”n”.join(line for line in lines if “skipped ‘no matched include pattern’” not in line)) source on GitHub |
backend_py |
module mlprodict.onnxrt.backend_py ONNX Backend for OnnxInference . :: import unittest from contextlib import redirect_stdout, redirect_stderr from io import StringIO from onnx.backend.test import BackendTest import mlprodict.onnxrt.backend_py as backend back_test = BackendTest(backend, __name__) back_test.exclude(‘.*_blvc_.*’) back_test.exclude(‘.*_densenet_.*’) back_test.exclude(‘.*_densenet121_.*’) back_test.exclude(‘.*_inception_.*’) back_test.exclude(‘.*_resnet50_.*’) back_test.exclude(‘.*_shufflenet_.*’) back_test.exclude(‘.*_squeezenet_.*’) back_test.exclude(‘.*_vgg19_.*’) back_test.exclude(‘.*_zfnet512_.*’) globals().update(back_test.enable_report().test_cases) buffer = StringIO() print(’———————————’) if True: with redirect_stdout(buffer): with redirect_stderr(buffer): res = unittest.main(verbosity=2, exit=False) else: res = unittest.main(verbosity=2, exit=False) testsRun = res.result.testsRun errors = len(res.result.errors) skipped = len(res.result.skipped) unexpectedSuccesses = len(res.result.unexpectedSuccesses) expectedFailures = len(res.result.expectedFailures) print(’———————————’) print(“testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d ” “expectedFailures=%d” % ( testsRun, errors, skipped, unexpectedSuccesses, expectedFailures)) ran = testsRun - skipped print(“ratio=%f” % (1 - errors * 1.0 / ran)) print(’———————————’) lines = buffer.getvalue().split(’n’) print(”n”.join(line for line in lines if “skipped ‘no matched include pattern’” not in line)) source on GitHub |
backend_pyc |
module mlprodict.onnxrt.backend_pyc ONNX Backend for OnnxInference . :: import unittest from contextlib import redirect_stdout, redirect_stderr from io import StringIO from onnx.backend.test import BackendTest import mlprodict.onnxrt.backend_pyc as backend back_test = BackendTest(backend, __name__) back_test.exclude(‘.*_blvc_.*’) back_test.exclude(‘.*_densenet_.*’) back_test.exclude(‘.*_densenet121_.*’) back_test.exclude(‘.*_inception_.*’) back_test.exclude(‘.*_resnet50_.*’) back_test.exclude(‘.*_shufflenet_.*’) back_test.exclude(‘.*_squeezenet_.*’) back_test.exclude(‘.*_vgg19_.*’) back_test.exclude(‘.*_zfnet512_.*’) globals().update(back_test.enable_report().test_cases) buffer = StringIO() print(’———————————’) if True: with redirect_stdout(buffer): with redirect_stderr(buffer): res = unittest.main(verbosity=2, exit=False) else: res = unittest.main(verbosity=2, exit=False) testsRun = res.result.testsRun errors = len(res.result.errors) skipped = len(res.result.skipped) unexpectedSuccesses = len(res.result.unexpectedSuccesses) expectedFailures = len(res.result.expectedFailures) print(’———————————’) print(“testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d ” “expectedFailures=%d” % ( testsRun, errors, skipped, unexpectedSuccesses, expectedFailures)) ran = testsRun - skipped print(“ratio=%f” % (1 - errors * 1.0 / ran)) print(’———————————’) lines = buffer.getvalue().split(’n’) print(”n”.join(line for line in lines if “skipped ‘no matched include pattern’” not in line)) source on GitHub |
backend_pyeval |
module mlprodict.onnxrt.backend_pyeval ONNX Backend for OnnxInference . :: import unittest from contextlib import redirect_stdout, redirect_stderr from io import StringIO from onnx.backend.test import BackendTest import mlprodict.onnxrt.backend_pyeval as backend back_test = BackendTest(backend, __name__) back_test.exclude(‘.*_blvc_.*’) back_test.exclude(‘.*_densenet_.*’) back_test.exclude(‘.*_densenet121_.*’) back_test.exclude(‘.*_inception_.*’) back_test.exclude(‘.*_resnet50_.*’) back_test.exclude(‘.*_shufflenet_.*’) back_test.exclude(‘.*_squeezenet_.*’) back_test.exclude(‘.*_vgg19_.*’) back_test.exclude(‘.*_zfnet512_.*’) globals().update(back_test.enable_report().test_cases) buffer = StringIO() print(’———————————’) if True: with redirect_stdout(buffer): with redirect_stderr(buffer): res = unittest.main(verbosity=2, exit=False) else: res = unittest.main(verbosity=2, exit=False) testsRun = res.result.testsRun errors = len(res.result.errors) skipped = len(res.result.skipped) unexpectedSuccesses = len(res.result.unexpectedSuccesses) expectedFailures = len(res.result.expectedFailures) print(’———————————’) print(“testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d ” “expectedFailures=%d” % ( testsRun, errors, skipped, unexpectedSuccesses, expectedFailures)) ran = testsRun - skipped print(“ratio=%f” % (1 - errors * 1.0 / ran)) print(’———————————’) lines = buffer.getvalue().split(’n’) print(”n”.join(line for line in lines if “skipped ‘no matched include pattern’” not in line)) source on GitHub |
backend_shape |
module mlprodict.onnxrt.backend_shape ONNX Backend for OnnxInference . :: import unittest from contextlib import redirect_stdout, redirect_stderr from io import StringIO from onnx.backend.test import BackendTest import mlprodict.onnxrt.backend_shape as backend back_test = BackendTest(backend, __name__) back_test.exclude(‘.*_blvc_.*’) back_test.exclude(‘.*_densenet_.*’) back_test.exclude(‘.*_densenet121_.*’) back_test.exclude(‘.*_inception_.*’) back_test.exclude(‘.*_resnet50_.*’) back_test.exclude(‘.*_shufflenet_.*’) back_test.exclude(‘.*_squeezenet_.*’) back_test.exclude(‘.*_vgg19_.*’) back_test.exclude(‘.*_zfnet512_.*’) globals().update(back_test.enable_report().test_cases) buffer = StringIO() print(’———————————’) if True: with redirect_stdout(buffer): with redirect_stderr(buffer): res = unittest.main(verbosity=2, exit=False) else: res = unittest.main(verbosity=2, exit=False) testsRun = res.result.testsRun errors = len(res.result.errors) skipped = len(res.result.skipped) unexpectedSuccesses = len(res.result.unexpectedSuccesses) expectedFailures = len(res.result.expectedFailures) print(’———————————’) print(“testsRun=%d errors=%d skipped=%d unexpectedSuccesses=%d ” “expectedFailures=%d” % ( testsRun, errors, skipped, unexpectedSuccesses, expectedFailures)) ran = testsRun - skipped print(“ratio=%f” % (1 - errors * 1.0 / ran)) print(’———————————’) lines = buffer.getvalue().split(’n’) print(”n”.join(line for line in lines if “skipped ‘no matched include pattern’” not in line)) source on GitHub |
blas_lapack |
module mlprodict.testing.einsum.blas_lapack Direct calls to libraries BLAS and LAPACK. source on GitHub |
c_compilation |
module mlprodict.grammar.cc.c_compilation Helpers to compile C. source on GitHub |
cdist_score |
module mlprodict.onnx_conv.scorers.cdist_score Implementation of a dummy score using cdist. source on GitHub |
cleaning |
module mlprodict.tools.cleaning Better display. source on GitHub |
code_helper |
module mlprodict.tools.code_helper A couple of tools unrelated to what the package does. source on GitHub |
common_asv_skl |
module mlprodict.asv_benchmark.common_asv_skl Common class for all benchmarks testing converted models from scikit-learn with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
conv_lightgbm |
module mlprodict.onnx_conv.operator_converters.conv_lightgbm Modified converter from LightGbm.py. source on GitHub |
conv_transfer_transformer |
module mlprodict.onnx_conv.operator_converters.conv_transfer_transformer Converters for models from mlinsights. source on GitHub |
conv_xgboost |
module mlprodict.onnx_conv.operator_converters.conv_xgboost Modified converter from XGBoost.py. source on GitHub |
convert |
module mlprodict.onnx_conv.convert Overloads a conversion function. source on GitHub |
convert_validate |
module mlprodict.cli.convert_validate Command line about validation of prediction runtime. source on GitHub |
create_asv |
module mlprodict.asv_benchmark.create_asv Functions to creates a benchmark based on asv for many regressors and classifiers. source on GitHub |
direct_blas_lapack.cpython-39-x86_64-linux-gnu |
module mlprodict.testing.einsum.direct_blas_lapack Direct calls to libraries BLAS and LAPACK. The wrapper for GEMM still does not work for matrices which are not square. source on GitHub |
doc_helper |
module mlprodict.onnxrt.doc.doc_helper Documentation helper. source on GitHub |
doc_write_helper |
module mlprodict.onnxrt.doc.doc_write_helper Documentation helper. source on GitHub |
einsum |
module mlprodict.cli.einsum Command line to check einsum scenarios. source on GitHub |
einsum_bench |
module mlprodict.testing.einsum.einsum_bench Function to measure the performance of einsum decomposition. source on GitHub |
einsum_fct |
module mlprodict.testing.einsum.einsum_fct Main functions decomposing einsum computation into more simple functions. source on GitHub |
einsum_impl |
module mlprodict.testing.einsum.einsum_impl Main functions decomposing einsum computation into more simple functions. source on GitHub |
einsum_impl_classes |
module mlprodict.testing.einsum.einsum_impl_classes Classes representing the sequence of matrix operations to implement einsum computation. source on GitHub |
einsum_impl_ext |
module mlprodict.testing.einsum.einsum_impl_ext Functions implemented einsum computation for two matrices having the same dimensions. source on GitHub |
einsum_ml |
module mlprodict.testing.einsum.einsum_ml Functions used to predict the cost of a transposition. source on GitHub |
exc |
module mlprodict.grammar.grammar_sklearn.grammar.exc Exception definition. source on GitHub |
excs |
module mlprodict.onnxrt.excs Exceptions. source on GitHub |
experimental |
module mlprodict.testing.experimental Experimental implementation. source on GitHub |
experimental_c.cpython-39-x86_64-linux-gnu |
module mlprodict.testing.experimental_c_impl.experimental_c C++ experimental implementations. source on GitHub |
filename_helper |
module mlprodict.tools.filename_helper A couple of tools related to filenames. source on GitHub |
function_transformer_converters |
module mlprodict.onnx_conv.sklconv.function_transformer_converters Rewrites some of the converters implemented in sklearn-onnx. source on GitHub |
g_sklearn_identify |
module mlprodict.grammar.grammar_sklearn.g_sklearn_identify Helpers to identify an interpreter. source on GitHub |
g_sklearn_linear_model |
module mlprodict.grammar.grammar_sklearn.g_sklearn_linear_model List of interpreted from scikit-learn model. source on GitHub |
g_sklearn_main |
module mlprodict.grammar.grammar_sklearn.g_sklearn_main Main functions to convert machine learned model from scikit-learn model. source on GitHub |
g_sklearn_preprocessing |
module mlprodict.grammar.grammar_sklearn.g_sklearn_preprocessing Converters from scikit-learn model. source on GitHub |
g_sklearn_tree |
module mlprodict.grammar.grammar_sklearn.g_sklearn_tree List of converters from scikit-learn model. source on GitHub |
g_sklearn_type_helpers |
module mlprodict.grammar.grammar_sklearn.g_sklearn_type_helpers Tiny helpers for scikit-learn exporters. source on GitHub |
gactions |
module mlprodict.grammar.grammar_sklearn.grammar.gactions Action definition. source on GitHub |
gactions_num |
module mlprodict.grammar.grammar_sklearn.grammar.gactions_num Action definition. source on GitHub |
gactions_tensor |
module mlprodict.grammar.grammar_sklearn.grammar.gactions_tensor Action definition. source on GitHub |
gmlactions |
module mlprodict.grammar.grammar_sklearn.grammar.gmlactions Actions definition. source on GitHub |
graph_schema_helper |
module mlprodict.onnx_tools.optim.graph_schema_helper Functions to help guessing the final graph structure. source on GitHub |
graphs |
module mlprodict.tools.graphs Alternative to dot to display a graph. .. versionadded:: 0.7 source on GitHub |
gtypes |
module mlprodict.grammar.grammar_sklearn.grammar.gtypes Types definition. source on GitHub |
lgbm_helper |
module mlprodict.onnx_conv.helpers.lgbm_helper Helpers to speed up the conversion of Lightgbm models or transform it. source on GitHub |
model_checker |
module mlprodict.onnx_tools.model_checker Investigate issues happening with float32. source on GitHub |
model_info |
module mlprodict.tools.model_info Functions to help get more information about the models. source on GitHub |
model_verification |
module mlprodict.testing.model_verification Complex but recurring testing functions. source on GitHub |
nb_helper |
module mlprodict.nb_helper Helpers for notebooks. source on GitHub |
node_visitor_translator |
module mlprodict.onnx_tools.onnx_grammar.node_visitor_translator One class which visits a syntax tree. source on GitHub |
numpy_helper |
module mlprodict.onnx_tools.exports.numpy_helper Numpy helpers for the conversion from onnx to numpy. source on GitHub |
numpy_onnx_impl |
module mlprodict.npy.numpy_onnx_impl numpy functions implemented with onnx. .. versionadded:: 0.6 .. versionchanged:: 0.7 source on GitHub |
numpy_onnx_impl_body |
module mlprodict.npy.numpy_onnx_impl_body Design to implement graph as parameter. .. versionadded:: 0.8 source on GitHub |
numpy_onnx_impl_skl |
module mlprodict.npy.numpy_onnx_impl_skl numpy functions implemented with onnx. .. versionadded:: 0.6 source on GitHub |
numpy_onnx_pyrt |
module mlprodict.npy.numpy_onnx_pyrt numpy functions implemented with onnx and compiled with this python runtime. .. versionadded:: 0.6 source on GitHub |
numpy_onnx_pyrt_skl |
module mlprodict.npy.numpy_onnx_pyrt_skl numpy functions implemented with onnx and compiled with this python runtime. .. versionadded:: 0.6 source on GitHub |
onnx2py_helper |
module mlprodict.onnx_tools.onnx2py_helper Functions which converts ONNX object into readable python objects. source on GitHub |
onnx_backend |
module mlprodict.testing.onnx_backend Tests with onnx backend. source on GitHub |
onnx_code |
module mlprodict.cli.onnx_code Command line to check einsum scenarios. source on GitHub |
onnx_complex |
module mlprodict.onnx_conv.onnx_ops.onnx_complex Custom operators for complex numbers. source on GitHub |
onnx_export |
module mlprodict.onnx_tools.onnx_export Exports an ONNX graph in a way it can we created again with a python script. It relies on jinja2 and autopep8. .. versionadded:: 0.7 source on GitHub |
onnx_export_templates |
module mlprodict.onnx_tools.onnx_export_templates Templates to export an ONNX graph in a way it can we created again with a python script. .. versionadded:: 0.7 source on GitHub |
onnx_fft |
module mlprodict.onnx_conv.onnx_ops.onnx_fft Custom operators for FFT. source on GitHub |
onnx_gradient_op |
module mlprodict.onnx_conv.onnx_ops.onnx_gradient_op Custom operators for gradient numbers. source on GitHub |
onnx_helper |
module mlprodict.onnx_tools.optim.onnx_helper Statistics on ONNX models. source on GitHub |
onnx_inference |
module mlprodict.onnxrt.onnx_inference Implements a class able to compute the predictions from on an ONNX model. source on GitHub |
onnx_inference_exports |
module mlprodict.onnxrt.onnx_inference_exports Extensions to class OnnxInference . source on GitHub |
onnx_inference_node |
module mlprodict.onnxrt.onnx_inference_node OnnxInferenceNode definition. source on GitHub |
onnx_inference_ort_helper |
module mlprodict.tools.onnx_inference_ort_helper Helpers for onnxruntime. source on GitHub |
onnx_manipulations |
module mlprodict.onnx_tools.onnx_manipulations Implements a class able to compute the predictions from on an ONNX model. source on GitHub |
onnx_micro_runtime |
module mlprodict.onnxrt.onnx_micro_runtime Micro runtime for ONNX. .. versionadded:: 0.6 source on GitHub |
onnx_numpy_annotation |
module mlprodict.npy.onnx_numpy_annotation numpy annotations. .. versionadded:: 0.6 source on GitHub |
onnx_numpy_compiler |
module mlprodict.npy.onnx_numpy_compiler Implements numpy functions with onnx and a runtime. .. versionadded:: 0.6 source on GitHub |
onnx_numpy_wrapper |
module mlprodict.npy.onnx_numpy_wrapper Wraps numpy functions into onnx. .. versionadded:: 0.6 source on GitHub |
onnx_optimisation |
module mlprodict.onnx_tools.optim.onnx_optimisation Optimisations of ONNX graphs. source on GitHub |
onnx_optimisation_identity |
module mlprodict.onnx_tools.optim.onnx_optimisation_identity Optimisation of ONNX graphs. source on GitHub |
onnx_optimisation_redundant |
module mlprodict.onnx_tools.optim.onnx_optimisation_redundant Optimisation of ONNX graphs. source on GitHub |
onnx_optimisation_unused |
module mlprodict.onnx_tools.optim.onnx_optimisation_unused Optimisation of ONNX graphs. source on GitHub |
onnx_pipeline |
module mlprodict.sklapi.onnx_pipeline A pipeline which serializes into ONNX steps by steps. source on GitHub |
onnx_shape_inference |
module mlprodict.onnxrt.onnx_shape_inference Runtime to infer shapes. .. versionadded:: 0.9 source on GitHub |
onnx_sklearn_wrapper |
module mlprodict.npy.onnx_sklearn_wrapper Helpers to use numpy API to easily write converters for scikit-learn classes for onnx. .. versionadded:: 0.6 source on GitHub |
onnx_speed_up |
module mlprodict.sklapi.onnx_speed_up Speeding up scikit-learn with onnx. .. versionadded:: 0.7 source on GitHub |
onnx_tokenizer |
module mlprodict.onnx_conv.onnx_ops.onnx_tokenizer Custom operator Tokenizer. source on GitHub |
onnx_tokenizer |
module mlprodict.sklapi.onnx_tokenizer Wrapper tokenizrs implemented in onnxruntime-extensions. source on GitHub |
onnx_tools |
module mlprodict.onnx_tools.onnx_tools Functions to manipulate ONNX file. source on GitHub |
onnx_transformer |
module mlprodict.sklapi.onnx_transformer Wraps runtime into a scikit-learn transformer. source on GitHub |
onnx_translation |
module mlprodict.onnx_tools.onnx_grammar.onnx_translation One class which visits a syntax tree. source on GitHub |
onnx_translator |
module mlprodict.onnx_tools.onnx_grammar.onnx_translator One class which visits a syntax tree. source on GitHub |
onnx_variable |
module mlprodict.npy.onnx_variable Intermediate class between numpy and onnx. .. versionadded:: 0.6 source on GitHub |
onnx_version |
module mlprodict.npy.onnx_version Identifies a version of a function. .. versionadded:: 0.6 source on GitHub |
op_abs |
module mlprodict.onnxrt.ops_cpu.op_abs Runtime operator. source on GitHub |
op_acos |
module mlprodict.onnxrt.ops_cpu.op_acos Runtime operator. source on GitHub |
op_acosh |
module mlprodict.onnxrt.ops_cpu.op_acosh Runtime operator. source on GitHub |
op_add |
module mlprodict.onnxrt.ops_cpu.op_add Runtime operator. source on GitHub |
op_and |
module mlprodict.onnxrt.ops_cpu.op_and Runtime operator. source on GitHub |
op_argmax |
module mlprodict.onnxrt.ops_cpu.op_argmax Runtime operator. source on GitHub |
op_argmin |
module mlprodict.onnxrt.ops_cpu.op_argmin Runtime operator. source on GitHub |
op_array_feature_extractor |
module mlprodict.onnxrt.ops_cpu.op_array_feature_extractor Runtime operator. source on GitHub |
op_asin |
module mlprodict.onnxrt.ops_cpu.op_asin Runtime operator. source on GitHub |
op_asinh |
module mlprodict.onnxrt.ops_cpu.op_asinh Runtime operator. source on GitHub |
op_atan |
module mlprodict.onnxrt.ops_cpu.op_atan Runtime operator. source on GitHub |
op_atanh |
module mlprodict.onnxrt.ops_cpu.op_atanh Runtime operator. source on GitHub |
op_average_pool |
module mlprodict.onnxrt.ops_cpu.op_average_pool Runtime operator. source on GitHub |
op_batch_normalization |
module mlprodict.onnxrt.ops_cpu.op_batch_normalization Runtime operator. source on GitHub |
op_binarizer |
module mlprodict.onnxrt.ops_cpu.op_binarizer Runtime operator. source on GitHub |
op_bitshift |
module mlprodict.onnxrt.ops_cpu.op_bitshift Runtime operator. source on GitHub |
op_broadcast_gradient_args |
module mlprodict.onnxrt.ops_cpu.op_broadcast_gradient_args Runtime operator. source on GitHub |
op_cast |
module mlprodict.onnxrt.ops_cpu.op_cast Runtime operator. source on GitHub |
op_category_mapper |
module mlprodict.onnxrt.ops_cpu.op_category_mapper Runtime operator. source on GitHub |
op_cdist |
module mlprodict.onnxrt.ops_cpu.op_cdist Runtime operator. source on GitHub |
op_ceil |
module mlprodict.onnxrt.ops_cpu.op_ceil Runtime operator. source on GitHub |
op_celu |
module mlprodict.onnxrt.ops_cpu.op_celu Runtime operator. source on GitHub |
op_clip |
module mlprodict.onnxrt.ops_cpu.op_clip Runtime operator. source on GitHub |
op_complex_abs |
module mlprodict.onnxrt.ops_cpu.op_complex_abs Runtime operator. source on GitHub |
op_compress |
module mlprodict.onnxrt.ops_cpu.op_compress Runtime operator. source on GitHub |
op_concat |
module mlprodict.onnxrt.ops_cpu.op_concat Runtime operator. source on GitHub |
op_concat_from_sequence |
module mlprodict.onnxrt.ops_cpu.op_concat_from_sequence Runtime operator. source on GitHub |
op_constant |
module mlprodict.onnxrt.ops_cpu.op_constant Runtime operator. source on GitHub |
op_constant_of_shape |
module mlprodict.onnxrt.ops_cpu.op_constant_of_shape Runtime operator. source on GitHub |
op_conv |
module mlprodict.onnxrt.ops_cpu.op_conv Runtime operator. source on GitHub |
op_conv_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_conv_ Implements runtime for operator Conv. The code is inspired from conv.cc in onnxruntime. source on GitHub |
op_conv_transpose |
module mlprodict.onnxrt.ops_cpu.op_conv_transpose Runtime operator. source on GitHub |
op_conv_transpose_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_conv_transpose_ Implements runtime for operator Conv. The code is inspired from conv_transpose.cc in onnxruntime. source on GitHub |
op_cos |
module mlprodict.onnxrt.ops_cpu.op_cos Runtime operator. source on GitHub |
op_cosh |
module mlprodict.onnxrt.ops_cpu.op_cosh Runtime operator. source on GitHub |
op_cum_sum |
module mlprodict.onnxrt.ops_cpu.op_cum_sum Runtime operator. source on GitHub |
op_debug |
module mlprodict.onnxrt.ops_cpu.op_debug Runtime operator. source on GitHub |
op_dequantize_linear |
module mlprodict.onnxrt.ops_cpu.op_dequantize_linear Runtime operator. source on GitHub |
op_det |
module mlprodict.onnxrt.ops_cpu.op_det Runtime operator. source on GitHub |
op_dict_vectorizer |
module mlprodict.onnxrt.ops_cpu.op_dict_vectorizer Runtime operator. source on GitHub |
op_div |
module mlprodict.onnxrt.ops_cpu.op_div Runtime operator. source on GitHub |
op_dropout |
module mlprodict.onnxrt.ops_cpu.op_dropout Runtime operator. source on GitHub |
op_einsum |
module mlprodict.onnxrt.ops_cpu.op_einsum Runtime operator. source on GitHub |
op_elu |
module mlprodict.onnxrt.ops_cpu.op_elu Runtime operator. source on GitHub |
op_equal |
module mlprodict.onnxrt.ops_cpu.op_equal Runtime operator. source on GitHub |
op_erf |
module mlprodict.onnxrt.ops_cpu.op_erf Runtime operator. source on GitHub |
op_exp |
module mlprodict.onnxrt.ops_cpu.op_exp Runtime operator. source on GitHub |
op_expand |
module mlprodict.onnxrt.ops_cpu.op_expand Runtime operator. source on GitHub |
op_eyelike |
module mlprodict.onnxrt.ops_cpu.op_eyelike Runtime operator. source on GitHub |
op_feature_vectorizer |
module mlprodict.onnxrt.ops_cpu.op_feature_vectorizer Runtime operator. source on GitHub |
op_fft |
module mlprodict.onnxrt.ops_cpu.op_fft Runtime operator. source on GitHub |
op_fft2d |
module mlprodict.onnxrt.ops_cpu.op_fft2d Runtime operator. source on GitHub |
op_flatten |
module mlprodict.onnxrt.ops_cpu.op_flatten Runtime operator. source on GitHub |
op_floor |
module mlprodict.onnxrt.ops_cpu.op_floor Runtime operator. source on GitHub |
op_fused_matmul |
module mlprodict.onnxrt.ops_cpu.op_fused_matmul Runtime operator. source on GitHub |
op_gather |
module mlprodict.onnxrt.ops_cpu.op_gather Runtime operator. source on GitHub |
op_gather_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_gather_ Implements runtime for operator Gather. The code is inspired from tfidfvectorizer.cc in onnxruntime. source on GitHub |
op_gather_elements |
module mlprodict.onnxrt.ops_cpu.op_gather_elements Runtime operator. source on GitHub |
op_gemm |
module mlprodict.onnxrt.ops_cpu.op_gemm Runtime operator. source on GitHub |
op_global_average_pool |
module mlprodict.onnxrt.ops_cpu.op_global_average_pool Runtime operator. source on GitHub |
op_greater |
module mlprodict.onnxrt.ops_cpu.op_greater Runtime operator. source on GitHub |
op_hard_sigmoid |
module mlprodict.onnxrt.ops_cpu.op_hard_sigmoid Runtime operator. source on GitHub |
op_hardmax |
module mlprodict.onnxrt.ops_cpu.op_hardmax Runtime operator. source on GitHub |
op_identity |
module mlprodict.onnxrt.ops_cpu.op_identity Runtime operator. source on GitHub |
op_if |
module mlprodict.onnxrt.ops_cpu.op_if Runtime operator. source on GitHub |
op_imputer |
module mlprodict.onnxrt.ops_cpu.op_imputer Runtime operator. source on GitHub |
op_isinf |
module mlprodict.onnxrt.ops_cpu.op_isinf Runtime operator. source on GitHub |
op_isnan |
module mlprodict.onnxrt.ops_cpu.op_isnan Runtime operator. source on GitHub |
op_label_encoder |
module mlprodict.onnxrt.ops_cpu.op_label_encoder Runtime operator. source on GitHub |
op_leaky_relu |
module mlprodict.onnxrt.ops_cpu.op_leaky_relu Runtime operator. source on GitHub |
op_less |
module mlprodict.onnxrt.ops_cpu.op_less Runtime operator. source on GitHub |
op_linear_classifier |
module mlprodict.onnxrt.ops_cpu.op_linear_classifier Runtime operator. source on GitHub |
op_linear_regressor |
module mlprodict.onnxrt.ops_cpu.op_linear_regressor Runtime operator. source on GitHub |
op_log |
module mlprodict.onnxrt.ops_cpu.op_log Runtime operator. source on GitHub |
op_log_softmax |
module mlprodict.onnxrt.ops_cpu.op_log_softmax Runtime operator. source on GitHub |
op_loop |
module mlprodict.onnxrt.ops_cpu.op_loop Runtime operator. .. versionadded:: 0.7 source on GitHub |
op_lp_normalization |
module mlprodict.onnxrt.ops_cpu.op_lp_normalization Runtime operator. source on GitHub |
op_matmul |
module mlprodict.onnxrt.ops_cpu.op_matmul Runtime operator. source on GitHub |
op_max |
module mlprodict.onnxrt.ops_cpu.op_max Runtime operator. source on GitHub |
op_max_pool |
module mlprodict.onnxrt.ops_cpu.op_max_pool Runtime operator. source on GitHub |
op_max_pool_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_max_pool_ Implements runtime for operator MaxPool. The code is inspired from pool.cc in onnxruntime. source on GitHub |
op_mean |
module mlprodict.onnxrt.ops_cpu.op_mean Runtime operator. source on GitHub |
op_min |
module mlprodict.onnxrt.ops_cpu.op_min Runtime operator. source on GitHub |
op_mod |
module mlprodict.onnxrt.ops_cpu.op_mod Runtime operator. source on GitHub |
op_mul |
module mlprodict.onnxrt.ops_cpu.op_mul Runtime operator. source on GitHub |
op_neg |
module mlprodict.onnxrt.ops_cpu.op_neg Runtime operator. source on GitHub |
op_negative_log_likelihood_loss |
module mlprodict.onnxrt.ops_cpu.op_negative_log_likelihood_loss Runtime operator. source on GitHub |
op_normalizer |
module mlprodict.onnxrt.ops_cpu.op_normalizer Runtime operator. source on GitHub |
op_not |
module mlprodict.onnxrt.ops_cpu.op_not Runtime operator. source on GitHub |
op_one_hot_encoder |
module mlprodict.onnxrt.ops_cpu.op_one_hot_encoder Runtime operator. source on GitHub |
op_or |
module mlprodict.onnxrt.ops_cpu.op_or Runtime operator. source on GitHub |
op_pad |
module mlprodict.onnxrt.ops_cpu.op_pad Runtime operator. source on GitHub |
op_pow |
module mlprodict.onnxrt.ops_cpu.op_pow Runtime operator. source on GitHub |
op_prelu |
module mlprodict.onnxrt.ops_cpu.op_prelu Runtime operator. source on GitHub |
op_qlinear_conv |
module mlprodict.onnxrt.ops_cpu.op_qlinear_conv Runtime operator. source on GitHub |
op_qlinear_conv_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_qlinear_conv_ Implements runtime for operator QLinearConv. The code is inspired from conv.cc in onnxruntime. source on GitHub |
op_quantize_linear |
module mlprodict.onnxrt.ops_cpu.op_quantize_linear Runtime operator. source on GitHub |
op_random |
module mlprodict.onnxrt.ops_cpu.op_random Runtime operator. source on GitHub |
op_range |
module mlprodict.onnxrt.ops_cpu.op_range Runtime operator. source on GitHub |
op_reciprocal |
module mlprodict.onnxrt.ops_cpu.op_reciprocal Runtime operator. source on GitHub |
op_reduce_l1 |
module mlprodict.onnxrt.ops_cpu.op_reduce_l1 Runtime operator. source on GitHub |
op_reduce_l2 |
module mlprodict.onnxrt.ops_cpu.op_reduce_l2 Runtime operator. source on GitHub |
op_reduce_log_sum |
module mlprodict.onnxrt.ops_cpu.op_reduce_log_sum Runtime operator. source on GitHub |
op_reduce_log_sum_exp |
module mlprodict.onnxrt.ops_cpu.op_reduce_log_sum_exp Runtime operator. source on GitHub |
op_reduce_max |
module mlprodict.onnxrt.ops_cpu.op_reduce_max Runtime operator. source on GitHub |
op_reduce_mean |
module mlprodict.onnxrt.ops_cpu.op_reduce_mean Runtime operator. source on GitHub |
op_reduce_min |
module mlprodict.onnxrt.ops_cpu.op_reduce_min Runtime operator. source on GitHub |
op_reduce_prod |
module mlprodict.onnxrt.ops_cpu.op_reduce_prod Runtime operator. source on GitHub |
op_reduce_sum |
module mlprodict.onnxrt.ops_cpu.op_reduce_sum Runtime operator. source on GitHub |
op_reduce_sum_square |
module mlprodict.onnxrt.ops_cpu.op_reduce_sum_square Runtime operator. source on GitHub |
op_relu |
module mlprodict.onnxrt.ops_cpu.op_relu Runtime operator. source on GitHub |
op_reshape |
module mlprodict.onnxrt.ops_cpu.op_reshape Runtime operator. source on GitHub |
op_rfft |
module mlprodict.onnxrt.ops_cpu.op_rfft Runtime operator. source on GitHub |
op_rnn |
module mlprodict.onnxrt.ops_cpu.op_rnn Runtime operator. source on GitHub |
op_round |
module mlprodict.onnxrt.ops_cpu.op_round Runtime operator. source on GitHub |
op_scaler |
module mlprodict.onnxrt.ops_cpu.op_scaler Runtime operator. source on GitHub |
op_scan |
module mlprodict.onnxrt.ops_cpu.op_scan Runtime operator. source on GitHub |
op_scatter_elements |
module mlprodict.onnxrt.ops_cpu.op_scatter_elements Runtime operator. source on GitHub |
op_selu |
module mlprodict.onnxrt.ops_cpu.op_selu Runtime operator. source on GitHub |
op_sequence_at |
module mlprodict.onnxrt.ops_cpu.op_sequence_at Runtime operator. .. versionadded:: 0.8 source on GitHub |
op_sequence_construct |
module mlprodict.onnxrt.ops_cpu.op_sequence_construct Runtime operator. .. versionadded:: 0.7 source on GitHub |
op_sequence_insert |
module mlprodict.onnxrt.ops_cpu.op_sequence_insert Runtime operator. .. versionadded:: 0.7 source on GitHub |
op_shape |
module mlprodict.onnxrt.ops_cpu.op_shape Runtime operator. source on GitHub |
op_sigmoid |
module mlprodict.onnxrt.ops_cpu.op_sigmoid Runtime operator. source on GitHub |
op_sign |
module mlprodict.onnxrt.ops_cpu.op_sign Runtime operator. source on GitHub |
op_sin |
module mlprodict.onnxrt.ops_cpu.op_sin Runtime operator. source on GitHub |
op_sinh |
module mlprodict.onnxrt.ops_cpu.op_sinh Runtime operator. source on GitHub |
op_size |
module mlprodict.onnxrt.ops_cpu.op_size Runtime operator. source on GitHub |
op_slice |
module mlprodict.onnxrt.ops_cpu.op_slice Runtime operator. source on GitHub |
op_softmax |
module mlprodict.onnxrt.ops_cpu.op_softmax Runtime operator. source on GitHub |
op_softmax_cross_entropy_loss |
module mlprodict.onnxrt.ops_cpu.op_softmax_cross_entropy_loss Runtime operator. source on GitHub |
op_solve |
module mlprodict.onnxrt.ops_cpu.op_solve Runtime operator. source on GitHub |
op_split |
module mlprodict.onnxrt.ops_cpu.op_split Runtime operator. source on GitHub |
op_sqrt |
module mlprodict.onnxrt.ops_cpu.op_sqrt Runtime operator. source on GitHub |
op_squeeze |
module mlprodict.onnxrt.ops_cpu.op_squeeze Runtime operator. source on GitHub |
op_string_normalizer |
module mlprodict.onnxrt.ops_cpu.op_string_normalizer Runtime operator. source on GitHub |
op_sub |
module mlprodict.onnxrt.ops_cpu.op_sub Runtime operator. source on GitHub |
op_sum |
module mlprodict.onnxrt.ops_cpu.op_sum Runtime operator. source on GitHub |
op_svm_classifier |
module mlprodict.onnxrt.ops_cpu.op_svm_classifier Runtime operator. source on GitHub |
op_svm_classifier_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_svm_classifier_ Implements runtime for operator SVMClassifier. The code is inspired from svm_classifier.cc in onnxruntime. source on GitHub |
op_svm_regressor |
module mlprodict.onnxrt.ops_cpu.op_svm_regressor Runtime operator. source on GitHub |
op_svm_regressor_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_svm_regressor_ Implements runtime for operator SVMRegressor. The code is inspired from svm_regressor.cc in onnxruntime. source on GitHub |
op_tan |
module mlprodict.onnxrt.ops_cpu.op_tan Runtime operator. source on GitHub |
op_tanh |
module mlprodict.onnxrt.ops_cpu.op_tanh Runtime operator. source on GitHub |
op_tfidfvectorizer |
module mlprodict.onnxrt.ops_cpu.op_tfidfvectorizer Runtime operator. source on GitHub |
op_tfidfvectorizer_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_tfidfvectorizer_ Implements runtime for operator TfIdfVectorizer. The code is inspired from tfidfvectorizer.cc in onnxruntime. source on GitHub |
op_tokenizer |
module mlprodict.onnxrt.ops_cpu.op_tokenizer Runtime operator. source on GitHub |
op_topk |
module mlprodict.onnxrt.ops_cpu.op_topk Runtime operator. source on GitHub |
op_transpose |
module mlprodict.onnxrt.ops_cpu.op_transpose Runtime operator. source on GitHub |
op_tree_ensemble_classifier |
module mlprodict.onnxrt.ops_cpu.op_tree_ensemble_classifier Runtime operator. source on GitHub |
op_tree_ensemble_classifier_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_tree_ensemble_classifier_ Implements runtime for operator TreeEnsembleClassifier. The code is inspired from tree_ensemble_classifier.cc in onnxruntime. source on GitHub |
op_tree_ensemble_classifier_p_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_tree_ensemble_classifier_p_ Implements runtime for operator TreeEnsembleClassifier. The code is inspired from tree_ensemble_Classifier.cc in onnxruntime. source on GitHub |
op_tree_ensemble_regressor |
module mlprodict.onnxrt.ops_cpu.op_tree_ensemble_regressor Runtime operator. source on GitHub |
op_tree_ensemble_regressor_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_tree_ensemble_regressor_ Implements runtime for operator TreeEnsembleRegressor. The code is inspired from tree_ensemble_regressor.cc in onnxruntime. source on GitHub |
op_tree_ensemble_regressor_p_.cpython-39-x86_64-linux-gnu |
module mlprodict.onnxrt.ops_cpu.op_tree_ensemble_regressor_p_ Implements runtime for operator TreeEnsembleRegressor. The code is inspired from tree_ensemble_regressor.cc in onnxruntime. source on GitHub |
op_trilu |
module mlprodict.onnxrt.ops_cpu.op_trilu Runtime operator. source on GitHub |
op_unsqueeze |
module mlprodict.onnxrt.ops_cpu.op_unsqueeze Runtime operator. source on GitHub |
op_where |
module mlprodict.onnxrt.ops_cpu.op_where Runtime operator. source on GitHub |
op_xor |
module mlprodict.onnxrt.ops_cpu.op_xor Runtime operator. source on GitHub |
op_yield_op |
module mlprodict.onnxrt.ops_cpu.op_yield_op Runtime operator. source on GitHub |
op_zipmap |
module mlprodict.onnxrt.ops_cpu.op_zipmap Runtime operator. source on GitHub |
ops |
module mlprodict.onnxrt.ops Loads runtime operator. source on GitHub |
optimize |
module mlprodict.cli.optimize Command line about model optimisation. source on GitHub |
ort_wrapper |
module mlprodict.tools.ort_wrapper Wrapper around onnxruntime. .. versionadded:: 0.6 source on GitHub |
parse_lightgbm |
module mlprodict.onnx_conv.operator_converters.parse_lightgbm Parsers for LightGBM booster. source on GitHub |
plotting |
module mlprodict.plotting.plotting Shorcuts to plotting functions. source on GitHub |
plotting_benchmark |
module mlprodict.plotting.plotting_benchmark Useful plots. source on GitHub |
plotting_onnx |
module mlprodict.plotting.plotting_onnx Useful plots. source on GitHub |
plotting_validate_graph |
module mlprodict.plotting.plotting_validate_graph Functions to help visualizing performances. source on GitHub |
quantized_tensor |
module mlprodict.testing.test_utils.quantized_tensor Initializes a quantized tensor from float values. source on GitHub |
register |
module mlprodict.onnx_conv.register Shortcut to onnx_conv. source on GitHub |
register |
module mlprodict.onnx_conv.scorers.register Registers new converters. source on GitHub |
register_rewritten_converters |
module mlprodict.onnx_conv.register_rewritten_converters Rewrites some of the converters implemented in sklearn-onnx. source on GitHub |
replay |
module mlprodict.cli.replay Command line about validation of prediction runtime. source on GitHub |
script_testing |
module mlprodict.testing.script_testing Utilies to test script from scikit-learn documentation. source on GitHub |
session |
module mlprodict.onnxrt.ops_whole.session Shortcut to ops_whole. source on GitHub |
shape_container |
module mlprodict.onnxrt.ops_shape.shape_container Class ShapeContainer source on GitHub |
shape_excs |
module mlprodict.onnxrt.ops_shape.shape_excs Errors and exceptions. source on GitHub |
shape_object |
module mlprodict.onnxrt.shape_object Shape object. source on GitHub |
shape_result |
module mlprodict.onnxrt.ops_shape.shape_result Class ShapeResult source on GitHub |
side_by_side |
module mlprodict.onnxrt.validate.side_by_side Helpers to compare executions. source on GitHub |
skl2onnx_helper |
module mlprodict.onnx_tools.exports.skl2onnx_helper Helpers to run examples created with sklearn-onnx. source on GitHub |
skl_model_classifier |
module mlprodict.asv_benchmark.template.skl_model_classifier A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_classifier_raw_scores |
module mlprodict.asv_benchmark.template.skl_model_classifier_raw_scores A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_clustering |
module mlprodict.asv_benchmark.template.skl_model_clustering A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_multi_classifier |
module mlprodict.asv_benchmark.template.skl_model_multi_classifier A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_outlier |
module mlprodict.asv_benchmark.template.skl_model_outlier A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_regressor |
module mlprodict.asv_benchmark.template.skl_model_regressor A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_trainable_transform |
module mlprodict.asv_benchmark.template.skl_model_trainable_transform A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_transform |
module mlprodict.asv_benchmark.template.skl_model_transform A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
skl_model_transform_positive |
module mlprodict.asv_benchmark.template.skl_model_transform_positive A template to benchmark a model with asv. The benchmark can be run through file run_asv.sh on Linux or run_asv.bat on Windows. .. warning:: On Windows, you should avoid cloning the repository on a folder with a long full name. Visual Studio tends to abide by the rule of the maximum path length even though the system is told otherwise. source on GitHub |
sklearn_helper |
module mlprodict.onnx_tools.optim.sklearn_helper Helpers to manipulate scikit-learn models. source on GitHub |
svm_converters |
module mlprodict.onnx_conv.sklconv.svm_converters Rewrites some of the converters implemented in sklearn-onnx. source on GitHub |
tests_helper |
module mlprodict.testing.test_utils.tests_helper Inspired from sklearn-onnx, handles two backends. source on GitHub |
text_plot |
module mlprodict.plotting.text_plot Text representations of graphs. source on GitHub |
tf2onnx_helper |
module mlprodict.onnx_tools.exports.tf2onnx_helper Helpers to run examples created with function export2tf2onnx() . source on GitHub |
tree_converters |
module mlprodict.onnx_conv.sklconv.tree_converters Rewrites some of the converters implemented in sklearn-onnx. source on GitHub |
type_object |
module mlprodict.onnxrt.type_object Type object. source on GitHub |
utils_backend |
module mlprodict.testing.test_utils.utils_backend Inspired from sklearn-onnx, handles two backends. source on GitHub |
utils_backend_common |
module mlprodict.testing.test_utils.utils_backend_common Inspired from sklearn-onnx, handles two backends. source on GitHub |
utils_backend_common_compare |
module mlprodict.testing.test_utils.utils_backend_common_compare Inspired from sklearn-onnx, handles two backends. source on GitHub |
utils_backend_onnxruntime |
module mlprodict.testing.test_utils.utils_backend_onnxruntime Inspired from sklearn-onnx, handles two backends. source on GitHub |
utils_backend_python |
module mlprodict.testing.test_utils.utils_backend_python Inspired from sklearn-onnx, handles two backends. source on GitHub |
validate |
module mlprodict.cli.validate Command line about validation of prediction runtime. source on GitHub |
validate |
module mlprodict.onnxrt.validate.validate Validates runtime for many :scikit-learn: operators. The submodule relies on onnxconverter_common, sklearn-onnx. source on GitHub |
validate_benchmark |
module mlprodict.onnxrt.validate.validate_benchmark Measures time processing for ONNX models. source on GitHub |
validate_benchmark_replay |
module mlprodict.onnxrt.validate.validate_benchmark_replay Measures time processing for ONNX models. source on GitHub |
validate_difference |
module mlprodict.onnxrt.validate.validate_difference Validates runtime for many :scikit-learn: operators. The submodule relies on onnxconverter_common, sklearn-onnx. source on GitHub |
validate_helper |
module mlprodict.onnxrt.validate.validate_helper Validates runtime for many scikit-learn operators. The submodule relies on onnxconverter_common, sklearn-onnx. source on GitHub |
validate_latency |
module mlprodict.onnxrt.validate.validate_latency Command line about validation of prediction runtime. source on GitHub |
validate_problems |
module mlprodict.onnxrt.validate.validate_problems Validates runtime for many :scikit-learn: operators. The submodule relies on onnxconverter_common, sklearn-onnx. source on GitHub |
validate_python |
module mlprodict.onnxrt.validate.validate_python Helpers to validate python code. source on GitHub |
validate_scenarios |
module mlprodict.onnx_conv.validate_scenarios Scenario for additional converters. source on GitHub |
validate_scenarios |
module mlprodict.onnxrt.validate.validate_scenarios Scenarios for validation. source on GitHub |
validate_summary |
module mlprodict.onnxrt.validate.validate_summary Summarizes results produces by function in validate.py. source on GitHub |
verify_code |
module mlprodict.testing.verify_code Looks into the code and detects error before finalizing the benchmark. source on GitHub |
xop |
module mlprodict.npy.xop Xop API to build onnx graphs. Inspired from sklearn-onnx. .. versionadded:: 0.9 source on GitHub |
xop_auto |
module mlprodict.npy.xop_auto Automates the generation of operators for the documentation for the Xop API. .. versionadded:: 0.9 source on GitHub |
xop_auto_import_ |
module mlprodict.npy.xop_auto_import_ Xop API. Importing this file takes time. It should be avoided. .. versionadded:: 0.9 source on GitHub |
xop_convert |
module mlprodict.npy.xop_convert Easier API to build onnx graphs. Inspired from skl2onnx. .. versionadded:: 0.9 source on GitHub |
xop_opset |
module mlprodict.npy.xop_opset Xop API to build onnx graphs. Inspired from sklearn-onnx. .. versionadded:: 0.9 source on GitHub |
xop_sphinx |
module mlprodict.npy.xop_sphinx Automates the generation of operators for the documentation for the Xop API. :: def setup(app): app.connect(‘builder-inited’, generate_op_doc) .. versionadded:: 0.9 source on GitHub |
xop_variable |
module mlprodict.npy.xop_variable Xop API to build onnx graphs. Inspired from sklearn-onnx. .. versionadded:: 0.9 source on GitHub |
zoo |
module mlprodict.tools.zoo Tools to test models from the ONNX Zoo. .. versionadded:: 0.6 source on GitHub |