Coverage for mlprodict/onnx_conv/onnx_ops/onnx_gradient_op.py: 100%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1"""
2@file
3@brief Custom operators for gradient numbers.
4"""
5from skl2onnx.algebra.onnx_operator import OnnxOperator
8class OnnxYieldOp_1(OnnxOperator):
9 """
10 Defines a custom operator for YieldOp.
11 """
13 since_version = 1
14 expected_inputs = [('X', 'T')]
15 expected_outputs = [('Y', 'T')]
16 input_range = [1, 1]
17 output_range = [1, 1]
18 is_deprecated = False
19 domain = 'com.microsoft'
20 operator_name = 'YieldOp'
21 past_version = {}
23 def __init__(self, X, non_differentiable_outputs=None,
24 full_shape_outputs=None, op_version=None, **kwargs):
25 """
26 :param X: array or OnnxOperatorMixin
27 :param non_differentiable_outputs: the indices of the module
28 outputs that doesn't have a gradient.
29 :param full_shape_outputs: the indices of the module outputs
30 that must have full shape.
31 :param op_version: opset version
32 :param kwargs: additional parameter
33 """
34 OnnxOperator.__init__(
35 self, X, op_version=op_version, **kwargs)
36 self.non_differentiable_outputs = non_differentiable_outputs
37 self.full_shape_outputs = full_shape_outputs
40OnnxYieldOp = OnnxYieldOp_1
43class OnnxBroadcastGradientArgs_1(OnnxOperator):
44 """
45 Defines a custom operator for BroadcastGradientArgs.
46 Returns the reduction axes for computing gradients of s0 op s1 with
47 broadcast. The ouput axes are deterministic from last to first.
48 Output is an empty vector when no reduction is necessary for the
49 corresponding input.
50 """
52 since_version = 1
53 expected_inputs = [('a_shape', 'T'), ('b_shape', 'T')]
54 expected_outputs = [('a_axes', 'T'), ('b_axes', 'T')]
55 input_range = [2, 2]
56 output_range = [2, 2]
57 is_deprecated = False
58 domain = 'com.microsoft'
59 operator_name = 'BroadcastGradientArgs'
60 past_version = {}
62 def __init__(self, a_shape, b_shape, op_version=None, **kwargs):
63 """
64 :param a_shape: The 1st input shape as Tensor.
65 :param b_shape: The 2nds input shape as Tensor.
66 :param op_version: opset version
67 :param kwargs: additional parameter
68 """
69 OnnxOperator.__init__(
70 self, a_shape, b_shape, op_version=op_version, **kwargs)
73OnnxBroadcastGradientArgs = OnnxBroadcastGradientArgs_1
76class OnnxFusedMatMul_1(OnnxOperator):
77 """
78 MatMul and Gemm without a C.
79 """
81 since_version = 1
82 expected_inputs = [('X', 'T'), ('X', 'T')]
83 expected_outputs = [('Z', 'T')]
84 input_range = [2, 2]
85 output_range = [1, 1]
86 is_deprecated = False
87 domain = 'com.microsoft'
88 operator_name = 'FusedMatMul'
89 past_version = {}
91 def __init__(self, X, Y, transA=0, transB=0,
92 op_version=None, **kwargs):
93 """
94 :param X: first matrix
95 :param Y: second matrix
96 :param transA: transpose first matrix
97 :param transB: transpose second matrix
98 :param op_version: opset version
99 :param kwargs: additional parameter
100 """
101 OnnxOperator.__init__(
102 self, X, Y, transA=transA, transB=transB,
103 op_version=op_version, **kwargs)
106OnnxFusedMatMul = OnnxFusedMatMul_1
109class OnnxSoftmaxGrad_13(OnnxOperator):
110 """
111 Gradient of Softmax.
112 SoftmaxGrad computes :math:`Y * ( dY - ReduceSum(Y * dY))`.
113 ONNX does not have a dot product,
114 which can be simulated as a pointwise-multiplication ("Mul"),
115 followed by a "ReduceSum". Unfortunately, the treatment of "axis"
116 is different in "SoftmaxGrad" and "ReduceSum".
117 If axis=k for SoftmaxGrad, we need to specify [k, ..., n-1] as the axes of
118 reduction for "ReduceSum", after accounting for negative-axis specification.
119 An alternative solution would be to Flatten inputs to 2D and then reshape
120 output back to original shape. Hopefully, many of these ops can be optimized
121 away in the common-case of statically-known shapes.
122 """
124 since_version = 1
125 expected_inputs = [('grad', 'T'), ('prob', 'T')]
126 expected_outputs = [('Y', 'T')]
127 input_range = [2, 2]
128 output_range = [1, 1]
129 is_deprecated = False
130 domain = 'com.microsoft'
131 operator_name = 'SoftmaxGrad_13'
132 past_version = {}
134 def __init__(self, grad, prob, op_version=None, **kwargs):
135 """
136 :param grad: gradient
137 :param prob: probablities
138 :param op_version: opset version
139 :param kwargs: additional parameter
140 """
141 OnnxOperator.__init__(
142 self, grad, prob, op_version=op_version, **kwargs)
145OnnxSoftmaxGrad = OnnxSoftmaxGrad_13