Logo

A performant, verbose and generic physical unit system.

Static Badge Static Badge

© 2026 Patrick Müller. All rights reserved.

Benchmark

Output of the benchmark described in this tutorial:

init(pint): 2.81 s
init(qunits): 0.28 s
Speedup: 10.19x

arithmetics(pint): 2.81 s
arithmetics(qunits): 2.92 s
Speedup: 0.96x

array_ops(pint): 1.68 s
array_ops(qunits): 0.97 s
Speedup: 1.72x

conversion(pint): 0.86 s
conversion(qunits): 0.84 s
Speedup: 1.02x

qunits dimension cache size: 26
qunits unit cache size: 200

This tutorial shows benchmark test of qunits compared to pint for different operations. The tests are split into four categories: initialization, standard arithmetics, array operations, and unit conversion. First, we import the necessary modules and set up the unit registries.

import os
import time

import numpy as np
from pint import UnitRegistry

from qunits import Quantity, u
from qunits.dimension import _dimension_cache
from qunits.unit import _unit_cache

pintcache = os.path.join(os.path.dirname(__file__), "__pintcache__")
p = UnitRegistry(cache_folder=pintcache)
p.m, p.mm, p.s

Next, we define the benchmark functions for each category. Each function takes the name of the library being tested, the unit registry, and the number of iterations to perform. The functions measure the time taken to perform a specific operation repeatedly and print the results.

def bench_init(name, ureg, n=100_000) -> float:
    m = ureg.m
    mm = ureg.mm

    t0 = time.perf_counter()
    for _ in range(n):
        _ = (3 * m) + (4 * mm)  # type: ignore

    dt = time.perf_counter() - t0
    print(f"init({name}): {dt:.2f} seconds")
    return dt


def bench_units(name, ureg, n=1_000_000) -> float:
    m = ureg.m
    s = ureg.s

    t0 = time.perf_counter()
    for _ in range(n):
        _ = m / s
        _ = m * s

    dt = time.perf_counter() - t0
    print(f"arithmetics({name}): {dt:.2f} seconds")
    return dt


def bench_array_ops(name, ureg, q, n=1_000) -> float:
    arr = np.ones(1_000_000)
    a = q(arr, ureg.m)
    b = q(arr, ureg.mm)

    t0 = time.perf_counter()
    for _ in range(n):
        _ = a + b

    dt = time.perf_counter() - t0
    print(f"array_ops({name}): {dt:.2f} seconds")
    return dt


def bench_conversion(name, ureg, q, n=1_000) -> float:
    arr = np.ones(1_000_000)
    a = q(arr, ureg.mm)

    t0 = time.perf_counter()
    for _ in range(n):
        _ = a.to(ureg.m)

    dt = time.perf_counter() - t0
    print(f"conversion({name}): {dt:.2f} seconds")
    return dt

Finally, we run the benchmark tests for each category and print the results.

n_samples = 100_000
dt_pint = bench_init("pint", p, n=n_samples)
dt_qunits = bench_init("qunits", u, n=n_samples)
print(f"Speedup: {dt_pint / dt_qunits:.2f}x\n")

n_samples = 1_000_000
dt_pint = bench_units("pint", p, n=n_samples)
dt_qunits = bench_units("qunits", u, n=n_samples)
print(f"Speedup: {dt_pint / dt_qunits:.2f}x\n")

n_samples = 1_000
dt_pint = bench_array_ops("pint", p, p.Quantity, n=n_samples)
dt_qunits = bench_array_ops("qunits", u, Quantity, n=n_samples)
print(f"Speedup: {dt_pint / dt_qunits:.2f}x\n")

n_samples = 1_000
dt_pint = bench_conversion("pint", p, p.Quantity, n=n_samples)
dt_qunits = bench_conversion("qunits", u, Quantity, n=n_samples)
print(f"Speedup: {dt_pint / dt_qunits:.2f}x\n")

print(f"dimension cache size: {len(_dimension_cache)}")
print(f"unit cache size: {len(_unit_cache)}")