1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

"""Compute the action of the matrix exponential. 

""" 

 

from __future__ import division, print_function, absolute_import 

 

import numpy as np 

 

import scipy.linalg 

import scipy.sparse.linalg 

from scipy.sparse.linalg import LinearOperator, aslinearoperator 

 

__all__ = ['expm_multiply'] 

 

 

def _exact_inf_norm(A): 

# A compatibility function which should eventually disappear. 

if scipy.sparse.isspmatrix(A): 

return max(abs(A).sum(axis=1).flat) 

else: 

return np.linalg.norm(A, np.inf) 

 

 

def _exact_1_norm(A): 

# A compatibility function which should eventually disappear. 

if scipy.sparse.isspmatrix(A): 

return max(abs(A).sum(axis=0).flat) 

else: 

return np.linalg.norm(A, 1) 

 

 

def _trace(A): 

# A compatibility function which should eventually disappear. 

if scipy.sparse.isspmatrix(A): 

return A.diagonal().sum() 

else: 

return np.trace(A) 

 

 

def _ident_like(A): 

# A compatibility function which should eventually disappear. 

if scipy.sparse.isspmatrix(A): 

return scipy.sparse.construct.eye(A.shape[0], A.shape[1], 

dtype=A.dtype, format=A.format) 

else: 

return np.eye(A.shape[0], A.shape[1], dtype=A.dtype) 

 

 

def expm_multiply(A, B, start=None, stop=None, num=None, endpoint=None): 

""" 

Compute the action of the matrix exponential of A on B. 

 

Parameters 

---------- 

A : transposable linear operator 

The operator whose exponential is of interest. 

B : ndarray 

The matrix or vector to be multiplied by the matrix exponential of A. 

start : scalar, optional 

The starting time point of the sequence. 

stop : scalar, optional 

The end time point of the sequence, unless `endpoint` is set to False. 

In that case, the sequence consists of all but the last of ``num + 1`` 

evenly spaced time points, so that `stop` is excluded. 

Note that the step size changes when `endpoint` is False. 

num : int, optional 

Number of time points to use. 

endpoint : bool, optional 

If True, `stop` is the last time point. Otherwise, it is not included. 

 

Returns 

------- 

expm_A_B : ndarray 

The result of the action :math:`e^{t_k A} B`. 

 

Notes 

----- 

The optional arguments defining the sequence of evenly spaced time points 

are compatible with the arguments of `numpy.linspace`. 

 

The output ndarray shape is somewhat complicated so I explain it here. 

The ndim of the output could be either 1, 2, or 3. 

It would be 1 if you are computing the expm action on a single vector 

at a single time point. 

It would be 2 if you are computing the expm action on a vector 

at multiple time points, or if you are computing the expm action 

on a matrix at a single time point. 

It would be 3 if you want the action on a matrix with multiple 

columns at multiple time points. 

If multiple time points are requested, expm_A_B[0] will always 

be the action of the expm at the first time point, 

regardless of whether the action is on a vector or a matrix. 

 

References 

---------- 

.. [1] Awad H. Al-Mohy and Nicholas J. Higham (2011) 

"Computing the Action of the Matrix Exponential, 

with an Application to Exponential Integrators." 

SIAM Journal on Scientific Computing, 

33 (2). pp. 488-511. ISSN 1064-8275 

http://eprints.ma.man.ac.uk/1591/ 

 

.. [2] Nicholas J. Higham and Awad H. Al-Mohy (2010) 

"Computing Matrix Functions." 

Acta Numerica, 

19. 159-208. ISSN 0962-4929 

http://eprints.ma.man.ac.uk/1451/ 

 

Examples 

-------- 

>>> from scipy.sparse import csc_matrix 

>>> from scipy.sparse.linalg import expm, expm_multiply 

>>> A = csc_matrix([[1, 0], [0, 1]]) 

>>> A.todense() 

matrix([[1, 0], 

[0, 1]], dtype=int64) 

>>> B = np.array([np.exp(-1.), np.exp(-2.)]) 

>>> B 

array([ 0.36787944, 0.13533528]) 

>>> expm_multiply(A, B, start=1, stop=2, num=3, endpoint=True) 

array([[ 1. , 0.36787944], 

[ 1.64872127, 0.60653066], 

[ 2.71828183, 1. ]]) 

>>> expm(A).dot(B) # Verify 1st timestep 

array([ 1. , 0.36787944]) 

>>> expm(1.5*A).dot(B) # Verify 2nd timestep 

array([ 1.64872127, 0.60653066]) 

>>> expm(2*A).dot(B) # Verify 3rd timestep 

array([ 2.71828183, 1. ]) 

""" 

if all(arg is None for arg in (start, stop, num, endpoint)): 

X = _expm_multiply_simple(A, B) 

else: 

X, status = _expm_multiply_interval(A, B, start, stop, num, endpoint) 

return X 

 

 

def _expm_multiply_simple(A, B, t=1.0, balance=False): 

""" 

Compute the action of the matrix exponential at a single time point. 

 

Parameters 

---------- 

A : transposable linear operator 

The operator whose exponential is of interest. 

B : ndarray 

The matrix to be multiplied by the matrix exponential of A. 

t : float 

A time point. 

balance : bool 

Indicates whether or not to apply balancing. 

 

Returns 

------- 

F : ndarray 

:math:`e^{t A} B` 

 

Notes 

----- 

This is algorithm (3.2) in Al-Mohy and Higham (2011). 

 

""" 

if balance: 

raise NotImplementedError 

if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

raise ValueError('expected A to be like a square matrix') 

if A.shape[1] != B.shape[0]: 

raise ValueError('the matrices A and B have incompatible shapes') 

ident = _ident_like(A) 

n = A.shape[0] 

if len(B.shape) == 1: 

n0 = 1 

elif len(B.shape) == 2: 

n0 = B.shape[1] 

else: 

raise ValueError('expected B to be like a matrix or a vector') 

u_d = 2**-53 

tol = u_d 

mu = _trace(A) / float(n) 

A = A - mu * ident 

A_1_norm = _exact_1_norm(A) 

if t*A_1_norm == 0: 

m_star, s = 0, 1 

else: 

ell = 2 

norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) 

m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) 

return _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol, balance) 

 

 

def _expm_multiply_simple_core(A, B, t, mu, m_star, s, tol=None, balance=False): 

""" 

A helper function. 

""" 

if balance: 

raise NotImplementedError 

if tol is None: 

u_d = 2 ** -53 

tol = u_d 

F = B 

eta = np.exp(t*mu / float(s)) 

for i in range(s): 

c1 = _exact_inf_norm(B) 

for j in range(m_star): 

coeff = t / float(s*(j+1)) 

B = coeff * A.dot(B) 

c2 = _exact_inf_norm(B) 

F = F + B 

if c1 + c2 <= tol * _exact_inf_norm(F): 

break 

c1 = c2 

F = eta * F 

B = F 

return F 

 

 

# This table helps to compute bounds. 

# They seem to have been difficult to calculate, involving symbolic 

# manipulation of equations, followed by numerical root finding. 

_theta = { 

# The first 30 values are from table A.3 of Computing Matrix Functions. 

1: 2.29e-16, 

2: 2.58e-8, 

3: 1.39e-5, 

4: 3.40e-4, 

5: 2.40e-3, 

6: 9.07e-3, 

7: 2.38e-2, 

8: 5.00e-2, 

9: 8.96e-2, 

10: 1.44e-1, 

# 11 

11: 2.14e-1, 

12: 3.00e-1, 

13: 4.00e-1, 

14: 5.14e-1, 

15: 6.41e-1, 

16: 7.81e-1, 

17: 9.31e-1, 

18: 1.09, 

19: 1.26, 

20: 1.44, 

# 21 

21: 1.62, 

22: 1.82, 

23: 2.01, 

24: 2.22, 

25: 2.43, 

26: 2.64, 

27: 2.86, 

28: 3.08, 

29: 3.31, 

30: 3.54, 

# The rest are from table 3.1 of 

# Computing the Action of the Matrix Exponential. 

35: 4.7, 

40: 6.0, 

45: 7.2, 

50: 8.5, 

55: 9.9, 

} 

 

 

def _onenormest_matrix_power(A, p, 

t=2, itmax=5, compute_v=False, compute_w=False): 

""" 

Efficiently estimate the 1-norm of A^p. 

 

Parameters 

---------- 

A : ndarray 

Matrix whose 1-norm of a power is to be computed. 

p : int 

Non-negative integer power. 

t : int, optional 

A positive parameter controlling the tradeoff between 

accuracy versus time and memory usage. 

Larger values take longer and use more memory 

but give more accurate output. 

itmax : int, optional 

Use at most this many iterations. 

compute_v : bool, optional 

Request a norm-maximizing linear operator input vector if True. 

compute_w : bool, optional 

Request a norm-maximizing linear operator output vector if True. 

 

Returns 

------- 

est : float 

An underestimate of the 1-norm of the sparse matrix. 

v : ndarray, optional 

The vector such that ||Av||_1 == est*||v||_1. 

It can be thought of as an input to the linear operator 

that gives an output with particularly large norm. 

w : ndarray, optional 

The vector Av which has relatively large 1-norm. 

It can be thought of as an output of the linear operator 

that is relatively large in norm compared to the input. 

 

""" 

#XXX Eventually turn this into an API function in the _onenormest module, 

#XXX and remove its underscore, 

#XXX but wait until expm_multiply goes into scipy. 

return scipy.sparse.linalg.onenormest(aslinearoperator(A) ** p) 

 

class LazyOperatorNormInfo: 

""" 

Information about an operator is lazily computed. 

 

The information includes the exact 1-norm of the operator, 

in addition to estimates of 1-norms of powers of the operator. 

This uses the notation of Computing the Action (2011). 

This class is specialized enough to probably not be of general interest 

outside of this module. 

 

""" 

def __init__(self, A, A_1_norm=None, ell=2, scale=1): 

""" 

Provide the operator and some norm-related information. 

 

Parameters 

---------- 

A : linear operator 

The operator of interest. 

A_1_norm : float, optional 

The exact 1-norm of A. 

ell : int, optional 

A technical parameter controlling norm estimation quality. 

scale : int, optional 

If specified, return the norms of scale*A instead of A. 

 

""" 

self._A = A 

self._A_1_norm = A_1_norm 

self._ell = ell 

self._d = {} 

self._scale = scale 

 

def set_scale(self,scale): 

""" 

Set the scale parameter. 

""" 

self._scale = scale 

 

def onenorm(self): 

""" 

Compute the exact 1-norm. 

""" 

if self._A_1_norm is None: 

self._A_1_norm = _exact_1_norm(self._A) 

return self._scale*self._A_1_norm 

 

def d(self, p): 

""" 

Lazily estimate d_p(A) ~= || A^p ||^(1/p) where ||.|| is the 1-norm. 

""" 

if p not in self._d: 

est = _onenormest_matrix_power(self._A, p, self._ell) 

self._d[p] = est ** (1.0 / p) 

return self._scale*self._d[p] 

 

def alpha(self, p): 

""" 

Lazily compute max(d(p), d(p+1)). 

""" 

return max(self.d(p), self.d(p+1)) 

 

def _compute_cost_div_m(m, p, norm_info): 

""" 

A helper function for computing bounds. 

 

This is equation (3.10). 

It measures cost in terms of the number of required matrix products. 

 

Parameters 

---------- 

m : int 

A valid key of _theta. 

p : int 

A matrix power. 

norm_info : LazyOperatorNormInfo 

Information about 1-norms of related operators. 

 

Returns 

------- 

cost_div_m : int 

Required number of matrix products divided by m. 

 

""" 

return int(np.ceil(norm_info.alpha(p) / _theta[m])) 

 

 

def _compute_p_max(m_max): 

""" 

Compute the largest positive integer p such that p*(p-1) <= m_max + 1. 

 

Do this in a slightly dumb way, but safe and not too slow. 

 

Parameters 

---------- 

m_max : int 

A count related to bounds. 

 

""" 

sqrt_m_max = np.sqrt(m_max) 

p_low = int(np.floor(sqrt_m_max)) 

p_high = int(np.ceil(sqrt_m_max + 1)) 

return max(p for p in range(p_low, p_high+1) if p*(p-1) <= m_max + 1) 

 

 

def _fragment_3_1(norm_info, n0, tol, m_max=55, ell=2): 

""" 

A helper function for the _expm_multiply_* functions. 

 

Parameters 

---------- 

norm_info : LazyOperatorNormInfo 

Information about norms of certain linear operators of interest. 

n0 : int 

Number of columns in the _expm_multiply_* B matrix. 

tol : float 

Expected to be 

:math:`2^{-24}` for single precision or 

:math:`2^{-53}` for double precision. 

m_max : int 

A value related to a bound. 

ell : int 

The number of columns used in the 1-norm approximation. 

This is usually taken to be small, maybe between 1 and 5. 

 

Returns 

------- 

best_m : int 

Related to bounds for error control. 

best_s : int 

Amount of scaling. 

 

Notes 

----- 

This is code fragment (3.1) in Al-Mohy and Higham (2011). 

The discussion of default values for m_max and ell 

is given between the definitions of equation (3.11) 

and the definition of equation (3.12). 

 

""" 

if ell < 1: 

raise ValueError('expected ell to be a positive integer') 

best_m = None 

best_s = None 

if _condition_3_13(norm_info.onenorm(), n0, m_max, ell): 

for m, theta in _theta.items(): 

s = int(np.ceil(norm_info.onenorm() / theta)) 

if best_m is None or m * s < best_m * best_s: 

best_m = m 

best_s = s 

else: 

# Equation (3.11). 

for p in range(2, _compute_p_max(m_max) + 1): 

for m in range(p*(p-1)-1, m_max+1): 

if m in _theta: 

s = _compute_cost_div_m(m, p, norm_info) 

if best_m is None or m * s < best_m * best_s: 

best_m = m 

best_s = s 

best_s = max(best_s, 1) 

return best_m, best_s 

 

 

def _condition_3_13(A_1_norm, n0, m_max, ell): 

""" 

A helper function for the _expm_multiply_* functions. 

 

Parameters 

---------- 

A_1_norm : float 

The precomputed 1-norm of A. 

n0 : int 

Number of columns in the _expm_multiply_* B matrix. 

m_max : int 

A value related to a bound. 

ell : int 

The number of columns used in the 1-norm approximation. 

This is usually taken to be small, maybe between 1 and 5. 

 

Returns 

------- 

value : bool 

Indicates whether or not the condition has been met. 

 

Notes 

----- 

This is condition (3.13) in Al-Mohy and Higham (2011). 

 

""" 

 

# This is the rhs of equation (3.12). 

p_max = _compute_p_max(m_max) 

a = 2 * ell * p_max * (p_max + 3) 

 

# Evaluate the condition (3.13). 

b = _theta[m_max] / float(n0 * m_max) 

return A_1_norm <= a * b 

 

 

def _expm_multiply_interval(A, B, start=None, stop=None, 

num=None, endpoint=None, balance=False, status_only=False): 

""" 

Compute the action of the matrix exponential at multiple time points. 

 

Parameters 

---------- 

A : transposable linear operator 

The operator whose exponential is of interest. 

B : ndarray 

The matrix to be multiplied by the matrix exponential of A. 

start : scalar, optional 

The starting time point of the sequence. 

stop : scalar, optional 

The end time point of the sequence, unless `endpoint` is set to False. 

In that case, the sequence consists of all but the last of ``num + 1`` 

evenly spaced time points, so that `stop` is excluded. 

Note that the step size changes when `endpoint` is False. 

num : int, optional 

Number of time points to use. 

endpoint : bool, optional 

If True, `stop` is the last time point. Otherwise, it is not included. 

balance : bool 

Indicates whether or not to apply balancing. 

status_only : bool 

A flag that is set to True for some debugging and testing operations. 

 

Returns 

------- 

F : ndarray 

:math:`e^{t_k A} B` 

status : int 

An integer status for testing and debugging. 

 

Notes 

----- 

This is algorithm (5.2) in Al-Mohy and Higham (2011). 

 

There seems to be a typo, where line 15 of the algorithm should be 

moved to line 6.5 (between lines 6 and 7). 

 

""" 

if balance: 

raise NotImplementedError 

if len(A.shape) != 2 or A.shape[0] != A.shape[1]: 

raise ValueError('expected A to be like a square matrix') 

if A.shape[1] != B.shape[0]: 

raise ValueError('the matrices A and B have incompatible shapes') 

ident = _ident_like(A) 

n = A.shape[0] 

if len(B.shape) == 1: 

n0 = 1 

elif len(B.shape) == 2: 

n0 = B.shape[1] 

else: 

raise ValueError('expected B to be like a matrix or a vector') 

u_d = 2**-53 

tol = u_d 

mu = _trace(A) / float(n) 

 

# Get the linspace samples, attempting to preserve the linspace defaults. 

linspace_kwargs = {'retstep': True} 

if num is not None: 

linspace_kwargs['num'] = num 

if endpoint is not None: 

linspace_kwargs['endpoint'] = endpoint 

samples, step = np.linspace(start, stop, **linspace_kwargs) 

 

# Convert the linspace output to the notation used by the publication. 

nsamples = len(samples) 

if nsamples < 2: 

raise ValueError('at least two time points are required') 

q = nsamples - 1 

h = step 

t_0 = samples[0] 

t_q = samples[q] 

 

# Define the output ndarray. 

# Use an ndim=3 shape, such that the last two indices 

# are the ones that may be involved in level 3 BLAS operations. 

X_shape = (nsamples,) + B.shape 

X = np.empty(X_shape, dtype=np.result_type(A.dtype, B.dtype, float)) 

t = t_q - t_0 

A = A - mu * ident 

A_1_norm = _exact_1_norm(A) 

ell = 2 

norm_info = LazyOperatorNormInfo(t*A, A_1_norm=t*A_1_norm, ell=ell) 

if t*A_1_norm == 0: 

m_star, s = 0, 1 

else: 

m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) 

 

# Compute the expm action up to the initial time point. 

X[0] = _expm_multiply_simple_core(A, B, t_0, mu, m_star, s) 

 

# Compute the expm action at the rest of the time points. 

if q <= s: 

if status_only: 

return 0 

else: 

return _expm_multiply_interval_core_0(A, X, 

h, mu, q, norm_info, tol, ell,n0) 

elif not (q % s): 

if status_only: 

return 1 

else: 

return _expm_multiply_interval_core_1(A, X, 

h, mu, m_star, s, q, tol) 

elif (q % s): 

if status_only: 

return 2 

else: 

return _expm_multiply_interval_core_2(A, X, 

h, mu, m_star, s, q, tol) 

else: 

raise Exception('internal error') 

 

 

def _expm_multiply_interval_core_0(A, X, h, mu, q, norm_info, tol, ell, n0): 

""" 

A helper function, for the case q <= s. 

""" 

 

# Compute the new values of m_star and s which should be applied 

# over intervals of size t/q 

if norm_info.onenorm() == 0: 

m_star, s = 0, 1 

else: 

norm_info.set_scale(1./q) 

m_star, s = _fragment_3_1(norm_info, n0, tol, ell=ell) 

norm_info.set_scale(1) 

 

for k in range(q): 

X[k+1] = _expm_multiply_simple_core(A, X[k], h, mu, m_star, s) 

return X, 0 

 

 

def _expm_multiply_interval_core_1(A, X, h, mu, m_star, s, q, tol): 

""" 

A helper function, for the case q > s and q % s == 0. 

""" 

d = q // s 

input_shape = X.shape[1:] 

K_shape = (m_star + 1, ) + input_shape 

K = np.empty(K_shape, dtype=X.dtype) 

for i in range(s): 

Z = X[i*d] 

K[0] = Z 

high_p = 0 

for k in range(1, d+1): 

F = K[0] 

c1 = _exact_inf_norm(F) 

for p in range(1, m_star+1): 

if p > high_p: 

K[p] = h * A.dot(K[p-1]) / float(p) 

coeff = float(pow(k, p)) 

F = F + coeff * K[p] 

inf_norm_K_p_1 = _exact_inf_norm(K[p]) 

c2 = coeff * inf_norm_K_p_1 

if c1 + c2 <= tol * _exact_inf_norm(F): 

break 

c1 = c2 

X[k + i*d] = np.exp(k*h*mu) * F 

return X, 1 

 

 

def _expm_multiply_interval_core_2(A, X, h, mu, m_star, s, q, tol): 

""" 

A helper function, for the case q > s and q % s > 0. 

""" 

d = q // s 

j = q // d 

r = q - d * j 

input_shape = X.shape[1:] 

K_shape = (m_star + 1, ) + input_shape 

K = np.empty(K_shape, dtype=X.dtype) 

for i in range(j + 1): 

Z = X[i*d] 

K[0] = Z 

high_p = 0 

if i < j: 

effective_d = d 

else: 

effective_d = r 

for k in range(1, effective_d+1): 

F = K[0] 

c1 = _exact_inf_norm(F) 

for p in range(1, m_star+1): 

if p == high_p + 1: 

K[p] = h * A.dot(K[p-1]) / float(p) 

high_p = p 

coeff = float(pow(k, p)) 

F = F + coeff * K[p] 

inf_norm_K_p_1 = _exact_inf_norm(K[p]) 

c2 = coeff * inf_norm_K_p_1 

if c1 + c2 <= tol * _exact_inf_norm(F): 

break 

c1 = c2 

X[k + i*d] = np.exp(k*h*mu) * F 

return X, 2