module onnx_tools.onnx_export
#
Short summary#
module mlprodict.onnx_tools.onnx_export
Exports an ONNX graph in a way it can we created again with a python script. It relies on jinja2 and autopep8.
Functions#
function |
truncated documentation |
---|---|
Exports an ONNX model to the numpy syntax. The exports does not work with all operators. |
|
Exports an ONNX model to the onnx syntax. |
|
Exports an ONNX model to the tensorflow-onnx syntax. |
|
Exports an ONNX model to the onnx syntax. |
|
Exports an ONNX model to the onnx syntax. |
|
Returns the list of the same attribute. [el.att for el in ens]. |
Documentation#
Exports an ONNX graph in a way it can we created again with a python script. It relies on jinja2 and autopep8.
New in version 0.7.
- mlprodict.onnx_tools.onnx_export._nodes(graph, rename_name, used, output_names, use_onnx_tensor, templates, verbose, opset, rename, autopep_options, name, subgraphs, unique_operators)#
- mlprodict.onnx_tools.onnx_export.export2numpy(model_onnx, opset=None, verbose=True, name=None, rename=False, autopep_options=None)#
Exports an ONNX model to the numpy syntax. The exports does not work with all operators.
- Parameters
model_onnx – string or ONNX graph
opset – opset to export to (None to select the one from the graph)
verbose – inserts prints
name – to overwrite onnx name
rename – rename the names to get shorter names
autopep_options – autopep8 options
- Returns
python code
<<<
import numpy from sklearn.cluster import KMeans from mlprodict.onnx_conv import to_onnx from mlprodict.onnx_tools.onnx_export import export2numpy X = numpy.arange(20).reshape(10, 2).astype(numpy.float32) tr = KMeans(n_clusters=2) tr.fit(X) onx = to_onnx(tr, X, target_opset=14) code = export2numpy(onx) print(code)
>>>
import numpy import scipy.special as scipy_special import scipy.spatial.distance as scipy_distance from mlprodict.onnx_tools.exports.numpy_helper import ( argmax_use_numpy_select_last_index, argmin_use_numpy_select_last_index, array_feature_extrator, make_slice) def numpy_mlprodict_ONNX_KMeans(X): ''' Numpy function for ``mlprodict_ONNX_KMeans``. * producer: skl2onnx * version: 0 * description: ''' # initializers Ad_Addcst = numpy.array([41.0, 421.0], dtype=numpy.float32) Ge_Gemmcst = numpy.array([4.0, 5.0, 14.0, 15.0], dtype=numpy.float32).reshape((2, 2)) Mu_Mulcst = numpy.array([0.0], dtype=numpy.float32) # nodes Re_reduced0 = (X ** 2).sum(axis=1, keepdims=1) Mu_C0 = Re_reduced0 * Mu_Mulcst Ge_Y0 = X @ Ge_Gemmcst.T * -2.0 + Mu_C0 * 0.0 Ad_C01 = Re_reduced0 + Ge_Y0 Ad_C0 = Ad_Addcst + Ad_C01 label = argmin_use_numpy_select_last_index( Ad_C0, axis=1, keepdims=0, select_last_index=0) scores = Ad_C0 ** 0.5 return label, scores
This can be applied to the decomposition of an einsum equation into simple matrix operations.
<<<
import numpy from mlprodict.testing.einsum import decompose_einsum_equation from mlprodict.onnx_tools.onnx_export import export2numpy x1 = numpy.arange(8).reshape(2, 2, 2).astype(numpy.float32) x2 = numpy.arange(4).reshape(2, 2).astype(numpy.float32) r = numpy.einsum("bac,cd->ad", x1, x2) seq_clean = decompose_einsum_equation( "bac,cd->ad", strategy='numpy', clean=True) onx = seq_clean.to_onnx("Y", "X1", "X2", dtype=numpy.float32) code = export2numpy(onx, name="einsum") print(code)
>>>
import numpy import scipy.special as scipy_special import scipy.spatial.distance as scipy_distance from mlprodict.onnx_tools.exports.numpy_helper import ( argmax_use_numpy_select_last_index, argmin_use_numpy_select_last_index, array_feature_extrator, make_slice) def numpy_einsum(X1, X2): ''' Numpy function for ``einsum``. * producer: mlprodict * version: 0 * description: ''' # initializers einsum140165544669920_ba_batch_axes = numpy.array([0], dtype=numpy.int64) einsum140165544669920_ba_sum_axes = numpy.array([3], dtype=numpy.int64) einsum140165544669920_ba__01 = numpy.array([-1], dtype=numpy.int64) einsum140165544669920_ba_ones = numpy.array([1], dtype=numpy.int64) # nodes einsum140166443384016_id = X1 einsum140166443383872_ex = numpy.expand_dims( einsum140166443384016_id, axis=3) einsum140165545261328_tr = numpy.transpose( einsum140166443383872_ex, axes=(1, 0, 2, 3)) einsum140165545261472_re = einsum140165545261328_tr.sum(axis=1, keepdims=1) einsum140165545261424_id = X2 einsum140165545315344_ex = numpy.expand_dims( einsum140165545261424_id, axis=(0, 1)) einsum140165544669584_tr = numpy.transpose( einsum140165545261472_re, axes=(1, 0, 3, 2)) einsum140165544669680_tr = numpy.transpose( einsum140165545315344_ex, axes=(1, 0, 3, 2)) einsum140165544669920_ba_shape1 = numpy.array( einsum140165544669584_tr.shape, dtype=numpy.int64) einsum140165544669920_ba_shape2 = numpy.array( einsum140165544669680_tr.shape, dtype=numpy.int64) einsum140165544669920_ba_dim0g = numpy.take( einsum140165544669920_ba_shape1, einsum140165544669920_ba_batch_axes, axis=0) einsum140165544669920_ba_dim0bg = numpy.take( einsum140165544669920_ba_shape2, einsum140165544669920_ba_batch_axes, axis=0) einsum140165544669920_ba_dim1 = numpy.take( einsum140165544669920_ba_shape1, einsum140165544669920_ba_sum_axes, axis=0) einsum140165544669920_ba_dim2 = numpy.take( einsum140165544669920_ba_shape2, einsum140165544669920_ba_sum_axes, axis=0) einsum140165544669920_ba_resh1_11 = numpy.concatenate( [einsum140165544669920_ba__01, einsum140165544669920_ba_dim1], 0) einsum140165544669920_ba_resh2_11 = numpy.concatenate( [einsum140165544669920_ba__01, einsum140165544669920_ba_dim2], 0) einsum140165544669920_ba_aresh1 = einsum140165544669584_tr.reshape( tuple(einsum140165544669920_ba_resh1_11)) einsum140165544669920_ba_aresh2 = einsum140165544669680_tr.reshape( tuple(einsum140165544669920_ba_resh2_11)) einsum140165544669920_ba_gemm = einsum140165544669920_ba_aresh1 @ einsum140165544669920_ba_aresh2.T * 1.0 einsum140165544669920_ba_max_dim = numpy.maximum( einsum140165544669920_ba_dim0g, einsum140165544669920_ba_dim0bg) einsum140165544669920_ba_left_dim = numpy.take( einsum140165544669920_ba_shape1, [1], axis=0) einsum140165544669920_ba_right_dim = numpy.take( einsum140165544669920_ba_shape2, [2], axis=0) einsum140165544669920_ba_new_shape = numpy.concatenate( [einsum140165544669920_ba_max_dim, einsum140165544669920_ba_left_dim, einsum140165544669920_ba_right_dim, einsum140165544669920_ba_ones], 0) einsum140165544669920_ba_final = einsum140165544669920_ba_gemm.reshape( tuple(einsum140165544669920_ba_new_shape)) einsum140165544670208_tr = numpy.transpose( einsum140165544669920_ba_final, axes=(1, 0, 3, 2)) einsum140166443384064_sq = numpy.squeeze( einsum140165544670208_tr, axis=(1, 2)) einsum140165545316304_id = einsum140166443384064_sq Y = einsum140165545316304_id return Y
- mlprodict.onnx_tools.onnx_export.export2onnx(model_onnx, opset=None, verbose=True, name=None, rename=False, autopep_options=None)#
Exports an ONNX model to the onnx syntax.
- Parameters
model_onnx – string or ONNX graph
opset – opset to export to (None to select the one from the graph)
verbose – inserts prints
name – to overwrite onnx name
rename – rename the names to get shorter names
autopep_options – autopep8 options
- Returns
python code
The following example shows what a python code creating a graph implementing the KMeans would look like.
<<<
import numpy from sklearn.cluster import KMeans from mlprodict.onnx_conv import to_onnx from mlprodict.onnx_tools.onnx_export import export2onnx X = numpy.arange(20).reshape(10, 2).astype(numpy.float32) tr = KMeans(n_clusters=2) tr.fit(X) onx = to_onnx(tr, X, target_opset=14) code = export2onnx(onx) print(code)
>>>
import numpy from onnx import numpy_helper, TensorProto from onnx.helper import ( make_model, make_node, set_model_props, make_tensor, make_graph, make_tensor_value_info, make_opsetid, make_function) def create_model(): ''' Converted ``mlprodict_ONNX_KMeans``. * producer: skl2onnx * version: 0 * description: ''' # subgraphs # containers print('[containers]') # verbose initializers = [] nodes = [] inputs = [] outputs = [] functions = [] # opsets print('[opsets]') # verbose opsets = {'': 14} target_opset = 14 # subgraphs print('[subgraphs]') # verbose # initializers print('[initializers]') # verbose tensor = numpy_helper.from_array(numpy.array( [41.0, 421.0], dtype=numpy.float32), name='Ad_Addcst') initializers.append(tensor) tensor = numpy_helper.from_array(numpy.array( [4.0, 5.0, 14.0, 15.0], dtype=numpy.float32).reshape((2, 2)), name='Ge_Gemmcst') initializers.append(tensor) tensor = numpy_helper.from_array(numpy.array( [0.0], dtype=numpy.float32), name='Mu_Mulcst') initializers.append(tensor) # inputs print('[inputs]') # verbose inputs.append(make_tensor_value_info('X', 1, [None, 2])) # outputs print('[outputs]') # verbose outputs.append(make_tensor_value_info('label', 7, [None])) outputs.append(make_tensor_value_info('scores', 1, [None, 2])) # nodes print('[nodes]') # verbose node = make_node( 'ReduceSumSquare', ['X'], ['Re_reduced0'], name='Re_ReduceSumSquare', axes=[1], keepdims=1, domain='') nodes.append(node) node = make_node( 'Mul', ['Re_reduced0', 'Mu_Mulcst'], ['Mu_C0'], name='Mu_Mul', domain='') nodes.append(node) node = make_node( 'Gemm', ['X', 'Ge_Gemmcst', 'Mu_C0'], ['Ge_Y0'], name='Ge_Gemm', alpha=-2.0, transB=1, domain='') nodes.append(node) node = make_node( 'Add', ['Re_reduced0', 'Ge_Y0'], ['Ad_C01'], name='Ad_Add', domain='') nodes.append(node) node = make_node( 'Add', ['Ad_Addcst', 'Ad_C01'], ['Ad_C0'], name='Ad_Add1', domain='') nodes.append(node) node = make_node( 'ArgMin', ['Ad_C0'], ['label'], name='Ar_ArgMin', axis=1, keepdims=0, domain='') nodes.append(node) node = make_node( 'Sqrt', ['Ad_C0'], ['scores'], name='Sq_Sqrt', domain='') nodes.append(node) # opsets print('[opset]') # verbose opset_imports = [make_opsetid(domain, 1 if version is None else version) for domain, version in opsets.items()] # graph print('[graph]') # verbose graph = make_graph(nodes, 'mlprodict_ONNX_KMeans', inputs, outputs, initializers) # '7' onnx_model = make_model( graph, opset_imports=opset_imports, functions=functions) onnx_model.ir_version = 7 onnx_model.producer_name = 'skl2onnx' onnx_model.producer_version = '' onnx_model.domain = 'ai.onnx' onnx_model.model_version = 0 onnx_model.doc_string = '' set_model_props(onnx_model, {}) return onnx_model onnx_model = create_model()
- mlprodict.onnx_tools.onnx_export.export2tf2onnx(model_onnx, opset=None, verbose=True, name=None, rename=False, autopep_options=None)#
Exports an ONNX model to the tensorflow-onnx syntax.
- Parameters
model_onnx – string or ONNX graph
opset – opset to export to (None to select the one from the graph)
verbose – inserts prints
name – to overwrite onnx name
rename – rename the names to get shorter names
autopep_options – autopep8 options
- Returns
python code
<<<
import numpy from sklearn.cluster import KMeans from mlprodict.onnx_conv import to_onnx from mlprodict.onnx_tools.onnx_export import export2tf2onnx X = numpy.arange(20).reshape(10, 2).astype(numpy.float32) tr = KMeans(n_clusters=2) tr.fit(X) onx = to_onnx(tr, X, target_opset=14) code = export2tf2onnx(onx) print(code)
>>>
import inspect import collections import numpy from onnx import AttributeProto, TensorProto from onnx.helper import ( make_model, make_node, set_model_props, make_tensor, make_graph, make_tensor_value_info) # from tf2onnx.utils import make_name, make_sure, map_onnx_to_numpy_type from mlprodict.onnx_tools.exports.tf2onnx_helper import ( make_name, make_sure, map_onnx_to_numpy_type) # from tf2onnx.handler import tf_op # from tf2onnx.graph_builder import GraphBuilder from mlprodict.onnx_tools.exports.tf2onnx_helper import ( tf_op, Tf2OnnxConvert, GraphBuilder) @tf_op("mlprodict_ONNX_KMeans") class Convertmlprodict_ONNX_KMeansOp: supported_dtypes = [ numpy.float32, ] @classmethod def any_version(cls, opset, ctx, node, **kwargs): ''' Converter for ``mlprodict_ONNX_KMeans``. * producer: skl2onnx * version: 0 * description: ''' oldnode = node input_name = node.input[0] onnx_dtype = ctx.get_dtype(input_name) np_dtype = map_onnx_to_numpy_type(onnx_dtype) make_sure(np_dtype in Convertmlprodict_ONNX_KMeansOp.supported_dtypes, "Unsupported input type.") shape = ctx.get_shape(input_name) varx = {x: x for x in node.input} # initializers if getattr(ctx, 'verbose', False): print('[initializers] %r' % cls) value = numpy.array([41.0, 421.0], dtype=numpy.float32) varx['Ad_Addcst'] = ctx.make_const( name=make_name('init_Ad_Addcst'), np_val=value).name value = numpy.array([4.0, 5.0, 14.0, 15.0], dtype=numpy.float32).reshape((2, 2)) varx['Ge_Gemmcst'] = ctx.make_const( name=make_name('init_Ge_Gemmcst'), np_val=value).name value = numpy.array([0.0], dtype=numpy.float32) varx['Mu_Mulcst'] = ctx.make_const( name=make_name('init_Mu_Mulcst'), np_val=value).name # nodes if getattr(ctx, 'verbose', False): print('[nodes] %r' % cls) inputs = [varx['X']] node = ctx.make_node('ReduceSumSquare', inputs=inputs, attr=dict( axes=[1], keepdims=1), name=make_name('Re_ReduceSumSquare')) varx['Re_reduced0'] = node.output[0] inputs = [varx['Re_reduced0'], varx['Mu_Mulcst']] node = ctx.make_node('Mul', inputs=inputs, name=make_name('Mu_Mul')) varx['Mu_C0'] = node.output[0] inputs = [varx['X'], varx['Ge_Gemmcst'], varx['Mu_C0']] node = ctx.make_node('Gemm', inputs=inputs, attr=dict( alpha=-2.0, transB=1), name=make_name('Ge_Gemm')) varx['Ge_Y0'] = node.output[0] inputs = [varx['Re_reduced0'], varx['Ge_Y0']] node = ctx.make_node('Add', inputs=inputs, name=make_name('Ad_Add')) varx['Ad_C01'] = node.output[0] inputs = [varx['Ad_Addcst'], varx['Ad_C01']] node = ctx.make_node('Add', inputs=inputs, name=make_name('Ad_Add1')) varx['Ad_C0'] = node.output[0] inputs = [varx['Ad_C0']] node = ctx.make_node('ArgMin', inputs=inputs, attr=dict( axis=1, keepdims=0), name=make_name('Ar_ArgMin')) varx['label'] = node.output[0] inputs = [varx['Ad_C0']] node = ctx.make_node('Sqrt', inputs=inputs, name=make_name('Sq_Sqrt')) varx['scores'] = node.output[0] # finalize if getattr(ctx, 'verbose', False): print('[replace_all_inputs] %r' % cls) ctx.replace_all_inputs(oldnode.output[0], node.output[0]) ctx.remove_node(oldnode.name) @classmethod def version_13(cls, ctx, node, **kwargs): return cls.any_version(13, ctx, node, **kwargs) def create_model(): inputs = [] outputs = [] # inputs print('[inputs]') # verbose value = make_tensor_value_info('X', 1, [None, 2]) inputs.append(value) # outputs print('[outputs]') # verbose value = make_tensor_value_info('label', 7, [None]) outputs.append(value) value = make_tensor_value_info('scores', 1, [None, 2]) outputs.append(value) inames = [i.name for i in inputs] onames = [i.name for i in outputs] node = make_node('mlprodict_ONNX_KMeans', inames, onames, name='mlprodict_ONNX_KMeans') # graph print('[graph]') # verbose graph = make_graph([node], 'mlprodict_ONNX_KMeans', inputs, outputs) onnx_model = make_model(graph) onnx_model.ir_version = 7 onnx_model.producer_name = 'skl2onnx' onnx_model.producer_version = '' onnx_model.domain = 'ai.onnx' onnx_model.model_version = 0 onnx_model.doc_string = '' set_model_props(onnx_model, {}) # opsets print('[opset]') # verbose opsets = {'': 14} del onnx_model.opset_import[:] # pylint: disable=E1101 for dom, value in opsets.items(): op_set = onnx_model.opset_import.add() op_set.domain = dom op_set.version = value return onnx_model onnx_raw = create_model() onnx_model = Tf2OnnxConvert( onnx_raw, tf_op, target_opset={'': 14}, verbose=True).run()
- mlprodict.onnx_tools.onnx_export.export2xop(model_onnx, opset=None, verbose=True, name=None, rename=False, autopep_options=None)#
Exports an ONNX model to the onnx syntax.
- Parameters
model_onnx – string or ONNX graph
opset – opset to export to (None to select the one from the graph)
verbose – inserts prints
name – to overwrite onnx name
rename – rename the names to get shorter names
autopep_options – autopep8 options
- Returns
python code
The following example shows what a python code creating a graph implementing the KMeans would look like.
<<<
import numpy from sklearn.cluster import KMeans from mlprodict.onnx_conv import to_onnx from mlprodict.onnx_tools.onnx_export import export2xop X = numpy.arange(20).reshape(10, 2).astype(numpy.float32) tr = KMeans(n_clusters=2) tr.fit(X) onx = to_onnx(tr, X, target_opset=14) code = export2xop(onx) print(code)
>>>
import numpy from onnx import TensorProto from onnx.helper import make_tensor from mlprodict.npy.xop_variable import Variable from mlprodict.npy.xop import loadop, OnnxOperatorFunction def create_model(): ''' Converted ``mlprodict_ONNX_KMeans``. * producer: skl2onnx * version: 0 * description: ''' print('[operators]') # verbose OnnxConstant = loadop('Constant') OnnxIdentity = loadop('Identity') OnnxAdd = loadop('Add') OnnxArgMin = loadop('ArgMin') OnnxGemm = loadop('Gemm') OnnxMul = loadop('Mul') OnnxReduceSumSquare = loadop('ReduceSumSquare') OnnxSqrt = loadop('Sqrt') # inputs print('[inputs]') # verbose var_inputs = [] X = 'X' var_inputs.append(Variable(X, numpy.float32, [None, 2])) # outputs print('[outputs]') # verbose var_outputs = [] var_outputs.append(Variable('label', numpy.int64, [None]))var_outputs.append(Variable('scores', numpy.float32, [None, 2])) # subgraphs # containers print('[containers]') # verbose # opsets print('[opsets]') # verbose opsets = {'': 14} target_opset = 14 # subgraphs print('[subgraphs]') # verbose # initializers print('[initializers]') # verbose Ad_Addcst = OnnxIdentity(numpy.array( [421.0, 41.0], dtype=numpy.float32), op_version=14) Ge_Gemmcst = OnnxIdentity(numpy.array( [[14.0, 15.0], [4.0, 5.0]], dtype=numpy.float32).reshape((2, 2)), op_version=14) Mu_Mulcst = OnnxIdentity(numpy.array( [0.0], dtype=numpy.float32), op_version=14) # nodes print('[nodes]') # verbose Re_reduced0 = OnnxReduceSumSquare(X, axes=[1], keepdims=1, op_version=14) Mu_C0 = OnnxMul(Re_reduced0, Mu_Mulcst, op_version=14) Ge_Y0 = OnnxGemm(X, Ge_Gemmcst, Mu_C0, alpha=-2.0, transB=1, op_version=14) Ad_C01 = OnnxAdd(Re_reduced0, Ge_Y0, op_version=14) Ad_C0 = OnnxAdd(Ad_Addcst, Ad_C01, op_version=14) label = OnnxArgMin(Ad_C0, axis=1, keepdims=0, output_names=['label'], op_version=14) scores = OnnxSqrt(Ad_C0, output_names=['scores'], op_version=14) # graph return label.to_onnx( target_opset={'': 14}, inputs=var_inputs, outputs=var_outputs, other_outputs=[scores]) onnx_model = create_model()
- mlprodict.onnx_tools.onnx_export.export_template(model_onnx, templates, opset=None, verbose=True, name=None, rename=False, use_onnx_tensor=False, autopep_options=None, function_name='create_model')#
Exports an ONNX model to the onnx syntax.
- Parameters
model_onnx – string or ONNX graph
templates – exporting templates
opset – opset to export to (None to select the one from the graph)
verbose – insert prints
name – to overwrite onnx name
rename – rename the names to get shorter names
use_onnx_tensor – when an attribute is an array and its name is ‘value’, it converts that array into an ONNX tensor to avoid type mismatch, (operator ConstantOfShape, …)
autopep_options – autopep8 options
function_name – main function name in the code
- Returns
python code
- mlprodict.onnx_tools.onnx_export.select_attribute(ens, att, sort=False, unique=False, skip=None)#
Returns the list of the same attribute. [el.att for el in ens].
- Parameters
ens – list
att – attribute name
sort – sort the array
unique – returns the unique values
skip – to skip some names
- Returns
something like [el.att for el in ens]