1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

"""Routines for numerical differentiation.""" 

 

from __future__ import division 

 

import numpy as np 

from numpy.linalg import norm 

 

from scipy.sparse.linalg import LinearOperator 

from ..sparse import issparse, csc_matrix, csr_matrix, coo_matrix, find 

from ._group_columns import group_dense, group_sparse 

 

EPS = np.finfo(np.float64).eps 

 

 

def _adjust_scheme_to_bounds(x0, h, num_steps, scheme, lb, ub): 

"""Adjust final difference scheme to the presence of bounds. 

 

Parameters 

---------- 

x0 : ndarray, shape (n,) 

Point at which we wish to estimate derivative. 

h : ndarray, shape (n,) 

Desired finite difference steps. 

num_steps : int 

Number of `h` steps in one direction required to implement finite 

difference scheme. For example, 2 means that we need to evaluate 

f(x0 + 2 * h) or f(x0 - 2 * h) 

scheme : {'1-sided', '2-sided'} 

Whether steps in one or both directions are required. In other 

words '1-sided' applies to forward and backward schemes, '2-sided' 

applies to center schemes. 

lb : ndarray, shape (n,) 

Lower bounds on independent variables. 

ub : ndarray, shape (n,) 

Upper bounds on independent variables. 

 

Returns 

------- 

h_adjusted : ndarray, shape (n,) 

Adjusted step sizes. Step size decreases only if a sign flip or 

switching to one-sided scheme doesn't allow to take a full step. 

use_one_sided : ndarray of bool, shape (n,) 

Whether to switch to one-sided scheme. Informative only for 

``scheme='2-sided'``. 

""" 

if scheme == '1-sided': 

use_one_sided = np.ones_like(h, dtype=bool) 

elif scheme == '2-sided': 

h = np.abs(h) 

use_one_sided = np.zeros_like(h, dtype=bool) 

else: 

raise ValueError("`scheme` must be '1-sided' or '2-sided'.") 

 

if np.all((lb == -np.inf) & (ub == np.inf)): 

return h, use_one_sided 

 

h_total = h * num_steps 

h_adjusted = h.copy() 

 

lower_dist = x0 - lb 

upper_dist = ub - x0 

 

if scheme == '1-sided': 

x = x0 + h_total 

violated = (x < lb) | (x > ub) 

fitting = np.abs(h_total) <= np.maximum(lower_dist, upper_dist) 

h_adjusted[violated & fitting] *= -1 

 

forward = (upper_dist >= lower_dist) & ~fitting 

h_adjusted[forward] = upper_dist[forward] / num_steps 

backward = (upper_dist < lower_dist) & ~fitting 

h_adjusted[backward] = -lower_dist[backward] / num_steps 

elif scheme == '2-sided': 

central = (lower_dist >= h_total) & (upper_dist >= h_total) 

 

forward = (upper_dist >= lower_dist) & ~central 

h_adjusted[forward] = np.minimum( 

h[forward], 0.5 * upper_dist[forward] / num_steps) 

use_one_sided[forward] = True 

 

backward = (upper_dist < lower_dist) & ~central 

h_adjusted[backward] = -np.minimum( 

h[backward], 0.5 * lower_dist[backward] / num_steps) 

use_one_sided[backward] = True 

 

min_dist = np.minimum(upper_dist, lower_dist) / num_steps 

adjusted_central = (~central & (np.abs(h_adjusted) <= min_dist)) 

h_adjusted[adjusted_central] = min_dist[adjusted_central] 

use_one_sided[adjusted_central] = False 

 

return h_adjusted, use_one_sided 

 

 

relative_step = {"2-point": EPS**0.5, 

"3-point": EPS**(1/3), 

"cs": EPS**0.5} 

 

 

def _compute_absolute_step(rel_step, x0, method): 

if rel_step is None: 

rel_step = relative_step[method] 

sign_x0 = (x0 >= 0).astype(float) * 2 - 1 

return rel_step * sign_x0 * np.maximum(1.0, np.abs(x0)) 

 

 

def _prepare_bounds(bounds, x0): 

lb, ub = [np.asarray(b, dtype=float) for b in bounds] 

if lb.ndim == 0: 

lb = np.resize(lb, x0.shape) 

 

if ub.ndim == 0: 

ub = np.resize(ub, x0.shape) 

 

return lb, ub 

 

 

def group_columns(A, order=0): 

"""Group columns of a 2-d matrix for sparse finite differencing [1]_. 

 

Two columns are in the same group if in each row at least one of them 

has zero. A greedy sequential algorithm is used to construct groups. 

 

Parameters 

---------- 

A : array_like or sparse matrix, shape (m, n) 

Matrix of which to group columns. 

order : int, iterable of int with shape (n,) or None 

Permutation array which defines the order of columns enumeration. 

If int or None, a random permutation is used with `order` used as 

a random seed. Default is 0, that is use a random permutation but 

guarantee repeatability. 

 

Returns 

------- 

groups : ndarray of int, shape (n,) 

Contains values from 0 to n_groups-1, where n_groups is the number 

of found groups. Each value ``groups[i]`` is an index of a group to 

which i-th column assigned. The procedure was helpful only if 

n_groups is significantly less than n. 

 

References 

---------- 

.. [1] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of 

sparse Jacobian matrices", Journal of the Institute of Mathematics 

and its Applications, 13 (1974), pp. 117-120. 

""" 

if issparse(A): 

A = csc_matrix(A) 

else: 

A = np.atleast_2d(A) 

A = (A != 0).astype(np.int32) 

 

if A.ndim != 2: 

raise ValueError("`A` must be 2-dimensional.") 

 

m, n = A.shape 

 

if order is None or np.isscalar(order): 

rng = np.random.RandomState(order) 

order = rng.permutation(n) 

else: 

order = np.asarray(order) 

if order.shape != (n,): 

raise ValueError("`order` has incorrect shape.") 

 

A = A[:, order] 

 

if issparse(A): 

groups = group_sparse(m, n, A.indices, A.indptr) 

else: 

groups = group_dense(m, n, A) 

 

groups[order] = groups.copy() 

 

return groups 

 

 

def approx_derivative(fun, x0, method='3-point', rel_step=None, f0=None, 

bounds=(-np.inf, np.inf), sparsity=None, 

as_linear_operator=False, args=(), kwargs={}): 

"""Compute finite difference approximation of the derivatives of a 

vector-valued function. 

 

If a function maps from R^n to R^m, its derivatives form m-by-n matrix 

called the Jacobian, where an element (i, j) is a partial derivative of 

f[i] with respect to x[j]. 

 

Parameters 

---------- 

fun : callable 

Function of which to estimate the derivatives. The argument x 

passed to this function is ndarray of shape (n,) (never a scalar 

even if n=1). It must return 1-d array_like of shape (m,) or a scalar. 

x0 : array_like of shape (n,) or float 

Point at which to estimate the derivatives. Float will be converted 

to a 1-d array. 

method : {'3-point', '2-point', 'cs'}, optional 

Finite difference method to use: 

- '2-point' - use the first order accuracy forward or backward 

difference. 

- '3-point' - use central difference in interior points and the 

second order accuracy forward or backward difference 

near the boundary. 

- 'cs' - use a complex-step finite difference scheme. This assumes 

that the user function is real-valued and can be 

analytically continued to the complex plane. Otherwise, 

produces bogus results. 

rel_step : None or array_like, optional 

Relative step size to use. The absolute step size is computed as 

``h = rel_step * sign(x0) * max(1, abs(x0))``, possibly adjusted to 

fit into the bounds. For ``method='3-point'`` the sign of `h` is 

ignored. If None (default) then step is selected automatically, 

see Notes. 

f0 : None or array_like, optional 

If not None it is assumed to be equal to ``fun(x0)``, in this case 

the ``fun(x0)`` is not called. Default is None. 

bounds : tuple of array_like, optional 

Lower and upper bounds on independent variables. Defaults to no bounds. 

Each bound must match the size of `x0` or be a scalar, in the latter 

case the bound will be the same for all variables. Use it to limit the 

range of function evaluation. Bounds checking is not implemented 

when `as_linear_operator` is True. 

sparsity : {None, array_like, sparse matrix, 2-tuple}, optional 

Defines a sparsity structure of the Jacobian matrix. If the Jacobian 

matrix is known to have only few non-zero elements in each row, then 

it's possible to estimate its several columns by a single function 

evaluation [3]_. To perform such economic computations two ingredients 

are required: 

 

* structure : array_like or sparse matrix of shape (m, n). A zero 

element means that a corresponding element of the Jacobian 

identically equals to zero. 

* groups : array_like of shape (n,). A column grouping for a given 

sparsity structure, use `group_columns` to obtain it. 

 

A single array or a sparse matrix is interpreted as a sparsity 

structure, and groups are computed inside the function. A tuple is 

interpreted as (structure, groups). If None (default), a standard 

dense differencing will be used. 

 

Note, that sparse differencing makes sense only for large Jacobian 

matrices where each row contains few non-zero elements. 

as_linear_operator : bool, optional 

When True the function returns an `scipy.sparse.linalg.LinearOperator`. 

Otherwise it returns a dense array or a sparse matrix depending on 

`sparsity`. The linear operator provides an efficient way of computing 

``J.dot(p)`` for any vector ``p`` of shape (n,), but does not allow 

direct access to individual elements of the matrix. By default 

`as_linear_operator` is False. 

args, kwargs : tuple and dict, optional 

Additional arguments passed to `fun`. Both empty by default. 

The calling signature is ``fun(x, *args, **kwargs)``. 

 

Returns 

------- 

J : {ndarray, sparse matrix, LinearOperator} 

Finite difference approximation of the Jacobian matrix. 

If `as_linear_operator` is True returns a LinearOperator 

with shape (m, n). Otherwise it returns a dense array or sparse 

matrix depending on how `sparsity` is defined. If `sparsity` 

is None then a ndarray with shape (m, n) is returned. If 

`sparsity` is not None returns a csr_matrix with shape (m, n). 

For sparse matrices and linear operators it is always returned as 

a 2-dimensional structure, for ndarrays, if m=1 it is returned 

as a 1-dimensional gradient array with shape (n,). 

 

See Also 

-------- 

check_derivative : Check correctness of a function computing derivatives. 

 

Notes 

----- 

If `rel_step` is not provided, it assigned to ``EPS**(1/s)``, where EPS is 

machine epsilon for float64 numbers, s=2 for '2-point' method and s=3 for 

'3-point' method. Such relative step approximately minimizes a sum of 

truncation and round-off errors, see [1]_. 

 

A finite difference scheme for '3-point' method is selected automatically. 

The well-known central difference scheme is used for points sufficiently 

far from the boundary, and 3-point forward or backward scheme is used for 

points near the boundary. Both schemes have the second-order accuracy in 

terms of Taylor expansion. Refer to [2]_ for the formulas of 3-point 

forward and backward difference schemes. 

 

For dense differencing when m=1 Jacobian is returned with a shape (n,), 

on the other hand when n=1 Jacobian is returned with a shape (m, 1). 

Our motivation is the following: a) It handles a case of gradient 

computation (m=1) in a conventional way. b) It clearly separates these two 

different cases. b) In all cases np.atleast_2d can be called to get 2-d 

Jacobian with correct dimensions. 

 

References 

---------- 

.. [1] W. H. Press et. al. "Numerical Recipes. The Art of Scientific 

Computing. 3rd edition", sec. 5.7. 

 

.. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of 

sparse Jacobian matrices", Journal of the Institute of Mathematics 

and its Applications, 13 (1974), pp. 117-120. 

 

.. [3] B. Fornberg, "Generation of Finite Difference Formulas on 

Arbitrarily Spaced Grids", Mathematics of Computation 51, 1988. 

 

Examples 

-------- 

>>> import numpy as np 

>>> from scipy.optimize import approx_derivative 

>>> 

>>> def f(x, c1, c2): 

... return np.array([x[0] * np.sin(c1 * x[1]), 

... x[0] * np.cos(c2 * x[1])]) 

... 

>>> x0 = np.array([1.0, 0.5 * np.pi]) 

>>> approx_derivative(f, x0, args=(1, 2)) 

array([[ 1., 0.], 

[-1., 0.]]) 

 

Bounds can be used to limit the region of function evaluation. 

In the example below we compute left and right derivative at point 1.0. 

 

>>> def g(x): 

... return x**2 if x >= 1 else x 

... 

>>> x0 = 1.0 

>>> approx_derivative(g, x0, bounds=(-np.inf, 1.0)) 

array([ 1.]) 

>>> approx_derivative(g, x0, bounds=(1.0, np.inf)) 

array([ 2.]) 

""" 

if method not in ['2-point', '3-point', 'cs']: 

raise ValueError("Unknown method '%s'. " % method) 

 

x0 = np.atleast_1d(x0) 

if x0.ndim > 1: 

raise ValueError("`x0` must have at most 1 dimension.") 

 

lb, ub = _prepare_bounds(bounds, x0) 

 

if lb.shape != x0.shape or ub.shape != x0.shape: 

raise ValueError("Inconsistent shapes between bounds and `x0`.") 

 

if as_linear_operator and not (np.all(np.isinf(lb)) 

and np.all(np.isinf(ub))): 

raise ValueError("Bounds not supported when " 

"`as_linear_operator` is True.") 

 

def fun_wrapped(x): 

f = np.atleast_1d(fun(x, *args, **kwargs)) 

if f.ndim > 1: 

raise RuntimeError("`fun` return value has " 

"more than 1 dimension.") 

return f 

 

if f0 is None: 

f0 = fun_wrapped(x0) 

else: 

f0 = np.atleast_1d(f0) 

if f0.ndim > 1: 

raise ValueError("`f0` passed has more than 1 dimension.") 

 

if np.any((x0 < lb) | (x0 > ub)): 

raise ValueError("`x0` violates bound constraints.") 

 

if as_linear_operator: 

if rel_step is None: 

rel_step = relative_step[method] 

 

return _linear_operator_difference(fun_wrapped, x0, 

f0, rel_step, method) 

else: 

h = _compute_absolute_step(rel_step, x0, method) 

 

if method == '2-point': 

h, use_one_sided = _adjust_scheme_to_bounds( 

x0, h, 1, '1-sided', lb, ub) 

elif method == '3-point': 

h, use_one_sided = _adjust_scheme_to_bounds( 

x0, h, 1, '2-sided', lb, ub) 

elif method == 'cs': 

use_one_sided = False 

 

if sparsity is None: 

return _dense_difference(fun_wrapped, x0, f0, h, 

use_one_sided, method) 

else: 

if not issparse(sparsity) and len(sparsity) == 2: 

structure, groups = sparsity 

else: 

structure = sparsity 

groups = group_columns(sparsity) 

 

if issparse(structure): 

structure = csc_matrix(structure) 

else: 

structure = np.atleast_2d(structure) 

 

groups = np.atleast_1d(groups) 

return _sparse_difference(fun_wrapped, x0, f0, h, 

use_one_sided, structure, 

groups, method) 

 

 

def _linear_operator_difference(fun, x0, f0, h, method): 

m = f0.size 

n = x0.size 

 

if method == '2-point': 

def matvec(p): 

if np.array_equal(p, np.zeros_like(p)): 

return np.zeros(m) 

dx = h / norm(p) 

x = x0 + dx*p 

df = fun(x) - f0 

return df / dx 

 

elif method == '3-point': 

def matvec(p): 

if np.array_equal(p, np.zeros_like(p)): 

return np.zeros(m) 

dx = 2*h / norm(p) 

x1 = x0 - (dx/2)*p 

x2 = x0 + (dx/2)*p 

f1 = fun(x1) 

f2 = fun(x2) 

df = f2 - f1 

return df / dx 

 

elif method == 'cs': 

def matvec(p): 

if np.array_equal(p, np.zeros_like(p)): 

return np.zeros(m) 

dx = h / norm(p) 

x = x0 + dx*p*1.j 

f1 = fun(x) 

df = f1.imag 

return df / dx 

 

else: 

raise RuntimeError("Never be here.") 

 

return LinearOperator((m, n), matvec) 

 

 

def _dense_difference(fun, x0, f0, h, use_one_sided, method): 

m = f0.size 

n = x0.size 

J_transposed = np.empty((n, m)) 

h_vecs = np.diag(h) 

 

for i in range(h.size): 

if method == '2-point': 

x = x0 + h_vecs[i] 

dx = x[i] - x0[i] # Recompute dx as exactly representable number. 

df = fun(x) - f0 

elif method == '3-point' and use_one_sided[i]: 

x1 = x0 + h_vecs[i] 

x2 = x0 + 2 * h_vecs[i] 

dx = x2[i] - x0[i] 

f1 = fun(x1) 

f2 = fun(x2) 

df = -3.0 * f0 + 4 * f1 - f2 

elif method == '3-point' and not use_one_sided[i]: 

x1 = x0 - h_vecs[i] 

x2 = x0 + h_vecs[i] 

dx = x2[i] - x1[i] 

f1 = fun(x1) 

f2 = fun(x2) 

df = f2 - f1 

elif method == 'cs': 

f1 = fun(x0 + h_vecs[i]*1.j) 

df = f1.imag 

dx = h_vecs[i, i] 

else: 

raise RuntimeError("Never be here.") 

 

J_transposed[i] = df / dx 

 

if m == 1: 

J_transposed = np.ravel(J_transposed) 

 

return J_transposed.T 

 

 

def _sparse_difference(fun, x0, f0, h, use_one_sided, 

structure, groups, method): 

m = f0.size 

n = x0.size 

row_indices = [] 

col_indices = [] 

fractions = [] 

 

n_groups = np.max(groups) + 1 

for group in range(n_groups): 

# Perturb variables which are in the same group simultaneously. 

e = np.equal(group, groups) 

h_vec = h * e 

if method == '2-point': 

x = x0 + h_vec 

dx = x - x0 

df = fun(x) - f0 

# The result is written to columns which correspond to perturbed 

# variables. 

cols, = np.nonzero(e) 

# Find all non-zero elements in selected columns of Jacobian. 

i, j, _ = find(structure[:, cols]) 

# Restore column indices in the full array. 

j = cols[j] 

elif method == '3-point': 

# Here we do conceptually the same but separate one-sided 

# and two-sided schemes. 

x1 = x0.copy() 

x2 = x0.copy() 

 

mask_1 = use_one_sided & e 

x1[mask_1] += h_vec[mask_1] 

x2[mask_1] += 2 * h_vec[mask_1] 

 

mask_2 = ~use_one_sided & e 

x1[mask_2] -= h_vec[mask_2] 

x2[mask_2] += h_vec[mask_2] 

 

dx = np.zeros(n) 

dx[mask_1] = x2[mask_1] - x0[mask_1] 

dx[mask_2] = x2[mask_2] - x1[mask_2] 

 

f1 = fun(x1) 

f2 = fun(x2) 

 

cols, = np.nonzero(e) 

i, j, _ = find(structure[:, cols]) 

j = cols[j] 

 

mask = use_one_sided[j] 

df = np.empty(m) 

 

rows = i[mask] 

df[rows] = -3 * f0[rows] + 4 * f1[rows] - f2[rows] 

 

rows = i[~mask] 

df[rows] = f2[rows] - f1[rows] 

elif method == 'cs': 

f1 = fun(x0 + h_vec*1.j) 

df = f1.imag 

dx = h_vec 

cols, = np.nonzero(e) 

i, j, _ = find(structure[:, cols]) 

j = cols[j] 

else: 

raise ValueError("Never be here.") 

 

# All that's left is to compute the fraction. We store i, j and 

# fractions as separate arrays and later construct coo_matrix. 

row_indices.append(i) 

col_indices.append(j) 

fractions.append(df[i] / dx[j]) 

 

row_indices = np.hstack(row_indices) 

col_indices = np.hstack(col_indices) 

fractions = np.hstack(fractions) 

J = coo_matrix((fractions, (row_indices, col_indices)), shape=(m, n)) 

return csr_matrix(J) 

 

 

def check_derivative(fun, jac, x0, bounds=(-np.inf, np.inf), args=(), 

kwargs={}): 

"""Check correctness of a function computing derivatives (Jacobian or 

gradient) by comparison with a finite difference approximation. 

 

Parameters 

---------- 

fun : callable 

Function of which to estimate the derivatives. The argument x 

passed to this function is ndarray of shape (n,) (never a scalar 

even if n=1). It must return 1-d array_like of shape (m,) or a scalar. 

jac : callable 

Function which computes Jacobian matrix of `fun`. It must work with 

argument x the same way as `fun`. The return value must be array_like 

or sparse matrix with an appropriate shape. 

x0 : array_like of shape (n,) or float 

Point at which to estimate the derivatives. Float will be converted 

to 1-d array. 

bounds : 2-tuple of array_like, optional 

Lower and upper bounds on independent variables. Defaults to no bounds. 

Each bound must match the size of `x0` or be a scalar, in the latter 

case the bound will be the same for all variables. Use it to limit the 

range of function evaluation. 

args, kwargs : tuple and dict, optional 

Additional arguments passed to `fun` and `jac`. Both empty by default. 

The calling signature is ``fun(x, *args, **kwargs)`` and the same 

for `jac`. 

 

Returns 

------- 

accuracy : float 

The maximum among all relative errors for elements with absolute values 

higher than 1 and absolute errors for elements with absolute values 

less or equal than 1. If `accuracy` is on the order of 1e-6 or lower, 

then it is likely that your `jac` implementation is correct. 

 

See Also 

-------- 

approx_derivative : Compute finite difference approximation of derivative. 

 

Examples 

-------- 

>>> import numpy as np 

>>> from scipy.optimize import check_derivative 

>>> 

>>> 

>>> def f(x, c1, c2): 

... return np.array([x[0] * np.sin(c1 * x[1]), 

... x[0] * np.cos(c2 * x[1])]) 

... 

>>> def jac(x, c1, c2): 

... return np.array([ 

... [np.sin(c1 * x[1]), c1 * x[0] * np.cos(c1 * x[1])], 

... [np.cos(c2 * x[1]), -c2 * x[0] * np.sin(c2 * x[1])] 

... ]) 

... 

>>> 

>>> x0 = np.array([1.0, 0.5 * np.pi]) 

>>> check_derivative(f, jac, x0, args=(1, 2)) 

2.4492935982947064e-16 

""" 

J_to_test = jac(x0, *args, **kwargs) 

if issparse(J_to_test): 

J_diff = approx_derivative(fun, x0, bounds=bounds, sparsity=J_to_test, 

args=args, kwargs=kwargs) 

J_to_test = csr_matrix(J_to_test) 

abs_err = J_to_test - J_diff 

i, j, abs_err_data = find(abs_err) 

J_diff_data = np.asarray(J_diff[i, j]).ravel() 

return np.max(np.abs(abs_err_data) / 

np.maximum(1, np.abs(J_diff_data))) 

else: 

J_diff = approx_derivative(fun, x0, bounds=bounds, 

args=args, kwargs=kwargs) 

abs_err = np.abs(J_to_test - J_diff) 

return np.max(abs_err / np.maximum(1, np.abs(J_diff)))