5.1. Matrix Multiplication Operator¶
import numpy as np
import jax.numpy as jnp
import pylops
from cr.sparse import lop
import cr.sparse as crs
m,n = 10000,10000
A_np = np.random.normal(0, 1, (m, n))
x_np = np.ones(n)
op_np = pylops.MatrixMult(A_np)
y_np = op_np * x_np
A_jax = jnp.array(A_np)
x_jax = jnp.array(x_np)
op_jax = lop.matrix(A_jax)
op_jax = lop.jit(op_jax)
y_jax = op_jax.times(x_jax)
np.allclose(y_np, y_jax, atol=1e-4)
True
np_time = %timeit -o op_np * x_np
24.5 ms ± 251 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
jax_time = %timeit -o op_jax.times(x_jax).block_until_ready()
1.04 ms ± 428 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
gain = np_time.average / jax_time.average
print(gain)
23.628277638522192
y1_np = op_np.H * x_np
y1_jax = op_jax.trans(x_jax).block_until_ready()
np.allclose(y1_np, y1_jax, atol=1e-4)
True
np_time = %timeit -o op_np.H * x_np
25.1 ms ± 210 µs per loop (mean ± std. dev. of 7 runs, 10 loops each)
jax_time = %timeit -o op_jax.trans(x_jax).block_until_ready()
1.04 ms ± 567 ns per loop (mean ± std. dev. of 7 runs, 1000 loops each)
gain = np_time.average / jax_time.average
print(gain)
24.128274915397053