Compares implementations of Tranpose#

This example compares the numpy.transpose from numpy, to onnxruntime implementation. If available, tensorflow and pytorch are included as well.

Available optimisation#

The code shows which parallelisation optimisation could be used, AVX or SSE and the number of available processors. Both numpy and torch have lazy implementations, the function switches dimensions and strides but does not move any data. That’s why function contiguous was called in both cases.

import numpy
import pandas
import matplotlib.pyplot as plt
from onnxruntime import InferenceSession
from skl2onnx.common.data_types import FloatTensorType
from skl2onnx.algebra.onnx_ops import OnnxTranspose
from cpyquickhelper.numbers import measure_time
from tqdm import tqdm
from mlprodict.testing.experimental_c_impl.experimental_c import code_optimisation
print(code_optimisation())

Out:

AVX-omp=8

Transpose implementations#

Function einsum is used from tensorflow and pytorch instead of transpose. The equation reflects the required transposition.

try:
    from tensorflow import transpose as tf_transpose, convert_to_tensor
except ImportError:
    tf_transpose = None
try:
    from torch import einsum as torch_einsum, from_numpy
except ImportError:
    torch_einsum = None


def build_ort_transpose(perm, op_version=12):
    node = OnnxTranspose('x', perm=perm, op_version=op_version,
                         output_names=['z'])
    onx = node.to_onnx(inputs=[('x', FloatTensorType())],
                       target_opset=op_version)
    sess = InferenceSession(onx.SerializeToString())
    return lambda x, y: sess.run(None, {'x': x})


def loop_fct(fct, xs, ys):
    for x, y in zip(xs, ys):
        fct(x, y)


def perm2eq(perm):
    first = "".join(chr(97 + i) for i in range(len(perm)))
    second = "".join(first[p] for p in perm)
    return "%s->%s" % (first, second)


def benchmark_op(perm, repeat=5, number=5, name="Transpose", shape_fct=None):
    if shape_fct is None:
        def shape_fct(dim): return (3, dim, 1, 512)
    ort_fct = build_ort_transpose(perm)
    res = []
    for dim in tqdm([8, 16, 32, 64, 100, 128, 200,
                     256, 400, 512, 1024]):
        shape = shape_fct(dim)
        n_arrays = 10 if dim < 512 else 4
        xs = [numpy.random.rand(*shape).astype(numpy.float32)
              for _ in range(n_arrays)]
        ys = [perm for _ in range(n_arrays)]
        equation = perm2eq(perm)
        info = dict(perm=perm, shape=shape)

        # numpy
        ctx = dict(
            xs=xs, ys=ys,
            fct=lambda x, y: numpy.ascontiguousarray(numpy.transpose(x, y)),
            loop_fct=loop_fct)
        obs = measure_time(
            "loop_fct(fct, xs, ys)",
            div_by_number=True, context=ctx, repeat=repeat, number=number)
        obs['dim'] = dim
        obs['fct'] = 'numpy'
        obs.update(info)
        res.append(obs)

        # onnxruntime
        ctx['fct'] = ort_fct
        obs = measure_time(
            "loop_fct(fct, xs, ys)",
            div_by_number=True, context=ctx, repeat=repeat, number=number)
        obs['dim'] = dim
        obs['fct'] = 'ort'
        obs.update(info)
        res.append(obs)

        if tf_transpose is not None:
            # tensorflow
            ctx['fct'] = tf_transpose
            ctx['xs'] = [convert_to_tensor(x) for x in xs]
            ctx['ys'] = [convert_to_tensor(y) for y in ys]
            obs = measure_time(
                "loop_fct(fct, xs, ys)",
                div_by_number=True, context=ctx, repeat=repeat, number=number)
            obs['dim'] = dim
            obs['fct'] = 'tf'
            obs.update(info)
            res.append(obs)

            # tensorflow with copy
            ctx['fct'] = lambda x, y: tf_transpose(
                convert_to_tensor(x)).numpy()
            ctx['xs'] = xs
            ctx['ys'] = ys
            obs = measure_time(
                "loop_fct(fct, xs, ys)",
                div_by_number=True, context=ctx, repeat=repeat, number=number)
            obs['dim'] = dim
            obs['fct'] = 'tf_copy'
            obs.update(info)
            res.append(obs)

        if torch_einsum is not None:
            # torch
            ctx['fct'] = lambda x, y: torch_einsum(equation, x).contiguous()
            ctx['xs'] = [from_numpy(x) for x in xs]
            ctx['ys'] = ys  # [from_numpy(y) for y in ys]
            obs = measure_time(
                "loop_fct(fct, xs, ys)",
                div_by_number=True, context=ctx, repeat=repeat, number=number)
            obs['dim'] = dim
            obs['fct'] = 'torch'
            obs.update(info)
            res.append(obs)

    # Dataframes
    shape_name = str(shape).replace(str(dim), "N")
    df = pandas.DataFrame(res)
    df.columns = [_.replace('dim', 'N') for _ in df.columns]
    piv = df.pivot('N', 'fct', 'average')

    rs = piv.copy()
    for c in ['ort', 'torch', 'tf', 'tf_copy']:
        if c in rs.columns:
            rs[c] = rs['numpy'] / rs[c]
    rs['numpy'] = 1.

    # Graphs.
    fig, ax = plt.subplots(1, 2, figsize=(12, 4))
    piv.plot(logx=True, logy=True, ax=ax[0],
             title="%s benchmark\n%r - %r - %s"
                   " lower better" % (name, shape_name, perm, equation))
    ax[0].legend(prop={"size": 9})
    rs.plot(logx=True, logy=True, ax=ax[1],
            title="%s Speedup, baseline=numpy\n%r - %r - %s"
                  " higher better" % (name, shape_name, perm, equation))
    ax[1].plot([min(rs.index), max(rs.index)], [0.5, 0.5], 'g--')
    ax[1].plot([min(rs.index), max(rs.index)], [2., 2.], 'g--')
    ax[1].legend(prop={"size": 9})
    return df, rs, ax


dfs = []

First permutation: (1, 0, 2, 3)#

perm = (1, 0, 2, 3)
df, piv, ax = benchmark_op(perm)
dfs.append(df)
df.pivot("fct", "N", "average")
Transpose benchmark '(3, N, 1, 512)' - (1, 0, 2, 3) - abcd->bacd lower better, Transpose Speedup, baseline=numpy '(3, N, 1, 512)' - (1, 0, 2, 3) - abcd->bacd higher better

Out:

  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:02,  3.67it/s]
 27%|##7       | 3/11 [00:01<00:03,  2.36it/s]
 36%|###6      | 4/11 [00:02<00:05,  1.24it/s]
 45%|####5     | 5/11 [00:05<00:07,  1.30s/it]
 55%|#####4    | 6/11 [00:07<00:08,  1.65s/it]
 64%|######3   | 7/11 [00:10<00:07,  1.98s/it]
 73%|#######2  | 8/11 [00:11<00:05,  1.93s/it]
 82%|########1 | 9/11 [00:14<00:04,  2.16s/it]
 91%|######### | 10/11 [00:15<00:01,  1.84s/it]
100%|##########| 11/11 [00:17<00:00,  1.86s/it]
100%|##########| 11/11 [00:17<00:00,  1.61s/it]
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.000359 0.000508 0.000921 0.001800 0.003248 0.003787 0.006294 0.008063 0.011554 0.006217 0.011421
ort 0.000859 0.001080 0.001995 0.004219 0.006624 0.008226 0.010319 0.012887 0.020097 0.009651 0.018383
torch 0.009379 0.001059 0.031233 0.053753 0.079955 0.079923 0.087090 0.046803 0.067508 0.023989 0.038561


Second permutation: (0, 1, 3, 2)#

perm = (1, 0, 3, 2)
df, piv, ax = benchmark_op(perm)
dfs.append(df)
df.pivot("fct", "N", "average")
Transpose benchmark '(3, N, 1, 512)' - (1, 0, 3, 2) - abcd->badc lower better, Transpose Speedup, baseline=numpy '(3, N, 1, 512)' - (1, 0, 3, 2) - abcd->badc higher better

Out:

  0%|          | 0/11 [00:00<?, ?it/s]
 18%|#8        | 2/11 [00:00<00:00,  9.50it/s]
 27%|##7       | 3/11 [00:01<00:05,  1.56it/s]
 36%|###6      | 4/11 [00:03<00:07,  1.07s/it]
 45%|####5     | 5/11 [00:05<00:08,  1.42s/it]
 55%|#####4    | 6/11 [00:07<00:08,  1.75s/it]
 64%|######3   | 7/11 [00:09<00:07,  1.81s/it]
 73%|#######2  | 8/11 [00:12<00:06,  2.10s/it]
 82%|########1 | 9/11 [00:16<00:05,  2.58s/it]
 91%|######### | 10/11 [00:18<00:02,  2.37s/it]
100%|##########| 11/11 [00:21<00:00,  2.59s/it]
100%|##########| 11/11 [00:21<00:00,  1.93s/it]
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.000349 0.000528 0.000870 0.001805 0.003118 0.004395 0.006017 0.008103 0.012479 0.006000 0.012535
ort 0.001808 0.002958 0.005557 0.011399 0.018050 0.022134 0.032399 0.041075 0.062756 0.031924 0.062660
torch 0.000944 0.001057 0.048489 0.056353 0.060512 0.066378 0.035463 0.054727 0.061531 0.033965 0.040117


Third permutation: (0, 2, 1, 3)#

This transposition is equivalent to a reshape because it only moves the empty axis. The comparison is entirely fair as the cost for onnxruntime includes a copy from numpy to onnxruntime, a reshape = another copy, than a copy back to numpy. Tensorflow and pytorch seems to have a lazy implementation in this case.

perm = (0, 2, 1, 3)
df, piv, ax = benchmark_op(perm)
dfs.append(df)
df.pivot("fct", "N", "average")
Transpose benchmark '(3, N, 1, 512)' - (0, 2, 1, 3) - abcd->acbd lower better, Transpose Speedup, baseline=numpy '(3, N, 1, 512)' - (0, 2, 1, 3) - abcd->acbd higher better

Out:

  0%|          | 0/11 [00:00<?, ?it/s]
 27%|##7       | 3/11 [00:00<00:00, 17.19it/s]
 45%|####5     | 5/11 [00:00<00:00,  8.13it/s]
 64%|######3   | 7/11 [00:01<00:00,  5.14it/s]
 73%|#######2  | 8/11 [00:01<00:00,  4.11it/s]
 82%|########1 | 9/11 [00:02<00:00,  3.03it/s]
 91%|######### | 10/11 [00:02<00:00,  3.10it/s]
100%|##########| 11/11 [00:03<00:00,  2.58it/s]
100%|##########| 11/11 [00:03<00:00,  3.64it/s]
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.000112 0.000114 0.000114 0.000113 0.000112 0.000117 0.000114 0.000114 0.000112 0.000051 0.000049
ort 0.000808 0.001096 0.001620 0.003903 0.006441 0.007058 0.008484 0.010349 0.015055 0.007431 0.013975
torch 0.000525 0.000516 0.000514 0.000513 0.000516 0.000513 0.000513 0.000513 0.000516 0.000209 0.000209


Fourth permutation: (3, 1, 2, 0)#

perm = (3, 1, 2, 0)
df, piv, ax = benchmark_op(perm)
dfs.append(df)
df.pivot("fct", "N", "average")
Transpose benchmark '(3, N, 1, 512)' - (3, 1, 2, 0) - abcd->dbca lower better, Transpose Speedup, baseline=numpy '(3, N, 1, 512)' - (3, 1, 2, 0) - abcd->dbca higher better

Out:

  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:01,  6.18it/s]
 18%|#8        | 2/11 [00:00<00:01,  4.52it/s]
 27%|##7       | 3/11 [00:01<00:06,  1.25it/s]
 36%|###6      | 4/11 [00:04<00:10,  1.48s/it]
 45%|####5     | 5/11 [00:07<00:13,  2.18s/it]
 55%|#####4    | 6/11 [00:12<00:14,  2.85s/it]
 64%|######3   | 7/11 [00:23<00:23,  5.76s/it]
 73%|#######2  | 8/11 [00:39<00:26,  8.98s/it]
 82%|########1 | 9/11 [01:03<00:27, 13.67s/it]
 91%|######### | 10/11 [01:15<00:13, 13.16s/it]
100%|##########| 11/11 [01:38<00:00, 16.12s/it]
100%|##########| 11/11 [01:38<00:00,  8.95s/it]
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.001453 0.002724 0.005594 0.011403 0.020167 0.043324 0.145799 0.198417 0.315583 0.159676 0.315009
ort 0.002109 0.003597 0.006917 0.014111 0.024397 0.049899 0.201771 0.301215 0.477231 0.245518 0.487928
torch 0.002607 0.003664 0.046355 0.073776 0.089914 0.070264 0.117514 0.130428 0.157869 0.071218 0.101796


Fifth permutation: (1, 2, 3, 0)#

perm = (1, 2, 3, 0)
df, piv, ax = benchmark_op(perm)
dfs.append(df)
df.pivot("fct", "N", "average")
Transpose benchmark '(3, N, 1, 512)' - (1, 2, 3, 0) - abcd->bcda lower better, Transpose Speedup, baseline=numpy '(3, N, 1, 512)' - (1, 2, 3, 0) - abcd->bcda higher better

Out:

  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:00<00:01,  9.50it/s]
 18%|#8        | 2/11 [00:00<00:01,  7.05it/s]
 27%|##7       | 3/11 [00:01<00:06,  1.33it/s]
 36%|###6      | 4/11 [00:03<00:08,  1.25s/it]
 45%|####5     | 5/11 [00:06<00:10,  1.82s/it]
 55%|#####4    | 6/11 [00:09<00:10,  2.13s/it]
 64%|######3   | 7/11 [00:12<00:10,  2.57s/it]
 73%|#######2  | 8/11 [00:16<00:08,  2.97s/it]
 82%|########1 | 9/11 [00:21<00:06,  3.48s/it]
 91%|######### | 10/11 [00:23<00:03,  3.05s/it]
100%|##########| 11/11 [00:26<00:00,  3.10s/it]
100%|##########| 11/11 [00:26<00:00,  2.41s/it]
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.001224 0.002255 0.004299 0.008579 0.013582 0.017154 0.026333 0.033357 0.051612 0.026411 0.052269
ort 0.000936 0.001252 0.002168 0.004702 0.007467 0.008952 0.011414 0.014455 0.021491 0.010817 0.020759
torch 0.001742 0.002677 0.051657 0.066012 0.089541 0.080423 0.097665 0.098950 0.103380 0.042338 0.046069


Six th permutation: (1, 2, 4, 3, 0)#

perm = (1, 2, 4, 3, 0)
df, piv, ax = benchmark_op(perm, shape_fct=lambda dim: (3, dim, 1, 8, 512))
dfs.append(df)
df.pivot("fct", "N", "average")
Transpose benchmark '(3, N, 1, 8, 512)' - (1, 2, 4, 3, 0) - abcde->bceda lower better, Transpose Speedup, baseline=numpy '(3, N, 1, 8, 512)' - (1, 2, 4, 3, 0) - abcde->bceda higher better

Out:

  0%|          | 0/11 [00:00<?, ?it/s]
  9%|9         | 1/11 [00:02<00:22,  2.22s/it]
 18%|#8        | 2/11 [00:05<00:27,  3.07s/it]
 27%|##7       | 3/11 [00:10<00:31,  3.98s/it]
 36%|###6      | 4/11 [00:18<00:38,  5.44s/it]
 45%|####5     | 5/11 [00:29<00:43,  7.29s/it]
 55%|#####4    | 6/11 [00:42<00:46,  9.20s/it]
 64%|######3   | 7/11 [01:01<00:49, 12.39s/it]
 73%|#######2  | 8/11 [01:24<00:47, 15.99s/it]
 82%|########1 | 9/11 [02:00<00:44, 22.10s/it]
 91%|######### | 10/11 [02:18<00:20, 20.81s/it]
100%|##########| 11/11 [02:53<00:00, 25.15s/it]
100%|##########| 11/11 [02:53<00:00, 15.75s/it]
N 8 16 32 64 100 128 200 256 400 512 1024
fct
numpy 0.010617 0.021198 0.041283 0.081879 0.127269 0.162384 0.253564 0.324624 0.504955 0.258458 0.518361
ort 0.014161 0.027662 0.051645 0.100948 0.157028 0.200037 0.311217 0.397793 0.619334 0.316762 0.633013
torch 0.062499 0.094994 0.104391 0.113281 0.122391 0.133449 0.161351 0.184524 0.231910 0.109582 0.182550


Conclusion#

All libraries have similar implementations. onnxruntime measures includes 2 mores copies, one to copy from numpy container to onnxruntime container, another one to copy back from onnxruntime container to numpy. Parallelisation should be investigated.

merged = pandas.concat(dfs)
name = "transpose"
merged.to_csv("plot_%s.csv" % name, index=False)
merged.to_excel("plot_%s.xlsx" % name, index=False)
plt.savefig("plot_%s.png" % name)

plt.show()
plot op transpose

Total running time of the script: ( 5 minutes 54.786 seconds)

Gallery generated by Sphinx-Gallery