1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

""" 

Pure SciPy implementation of Locally Optimal Block Preconditioned Conjugate 

Gradient Method (LOBPCG), see 

https://bitbucket.org/joseroman/blopex 

 

License: BSD 

 

Authors: Robert Cimrman, Andrew Knyazev 

 

Examples in tests directory contributed by Nils Wagner. 

""" 

 

from __future__ import division, print_function, absolute_import 

 

import sys 

 

import numpy as np 

from numpy.testing import assert_allclose 

from scipy._lib.six import xrange 

from scipy.linalg import inv, eigh, cho_factor, cho_solve, cholesky 

from scipy.sparse.linalg import aslinearoperator, LinearOperator 

 

__all__ = ['lobpcg'] 

 

 

def pause(): 

# Used only when verbosity level > 10. 

input() 

 

 

def save(ar, fileName): 

# Used only when verbosity level > 10. 

from numpy import savetxt 

savetxt(fileName, ar, precision=8) 

 

 

def _assert_symmetric(M, rtol=1e-5, atol=1e-8): 

assert_allclose(M.T.conj(), M, rtol=rtol, atol=atol) 

 

 

## 

# 21.05.2007, c 

 

 

def as2d(ar): 

""" 

If the input array is 2D return it, if it is 1D, append a dimension, 

making it a column vector. 

""" 

if ar.ndim == 2: 

return ar 

else: # Assume 1! 

aux = np.array(ar, copy=False) 

aux.shape = (ar.shape[0], 1) 

return aux 

 

 

def _makeOperator(operatorInput, expectedShape): 

"""Takes a dense numpy array or a sparse matrix or 

a function and makes an operator performing matrix * blockvector 

products. 

 

Examples 

-------- 

>>> A = _makeOperator( arrayA, (n, n) ) 

>>> vectorB = A( vectorX ) 

 

""" 

if operatorInput is None: 

def ident(x): 

return x 

operator = LinearOperator(expectedShape, ident, matmat=ident) 

else: 

operator = aslinearoperator(operatorInput) 

 

if operator.shape != expectedShape: 

raise ValueError('operator has invalid shape') 

 

return operator 

 

 

def _applyConstraints(blockVectorV, factYBY, blockVectorBY, blockVectorY): 

"""Changes blockVectorV in place.""" 

gramYBV = np.dot(blockVectorBY.T.conj(), blockVectorV) 

tmp = cho_solve(factYBY, gramYBV) 

blockVectorV -= np.dot(blockVectorY, tmp) 

 

 

def _b_orthonormalize(B, blockVectorV, blockVectorBV=None, retInvR=False): 

if blockVectorBV is None: 

if B is not None: 

blockVectorBV = B(blockVectorV) 

else: 

blockVectorBV = blockVectorV # Shared data!!! 

gramVBV = np.dot(blockVectorV.T.conj(), blockVectorBV) 

gramVBV = cholesky(gramVBV) 

gramVBV = inv(gramVBV, overwrite_a=True) 

# gramVBV is now R^{-1}. 

blockVectorV = np.dot(blockVectorV, gramVBV) 

if B is not None: 

blockVectorBV = np.dot(blockVectorBV, gramVBV) 

 

if retInvR: 

return blockVectorV, blockVectorBV, gramVBV 

else: 

return blockVectorV, blockVectorBV 

 

 

def lobpcg(A, X, 

B=None, M=None, Y=None, 

tol=None, maxiter=20, 

largest=True, verbosityLevel=0, 

retLambdaHistory=False, retResidualNormsHistory=False): 

"""Locally Optimal Block Preconditioned Conjugate Gradient Method (LOBPCG) 

 

LOBPCG is a preconditioned eigensolver for large symmetric positive 

definite (SPD) generalized eigenproblems. 

 

Parameters 

---------- 

A : {sparse matrix, dense matrix, LinearOperator} 

The symmetric linear operator of the problem, usually a 

sparse matrix. Often called the "stiffness matrix". 

X : array_like 

Initial approximation to the k eigenvectors. If A has 

shape=(n,n) then X should have shape shape=(n,k). 

B : {dense matrix, sparse matrix, LinearOperator}, optional 

the right hand side operator in a generalized eigenproblem. 

by default, B = Identity 

often called the "mass matrix" 

M : {dense matrix, sparse matrix, LinearOperator}, optional 

preconditioner to A; by default M = Identity 

M should approximate the inverse of A 

Y : array_like, optional 

n-by-sizeY matrix of constraints, sizeY < n 

The iterations will be performed in the B-orthogonal complement 

of the column-space of Y. Y must be full rank. 

 

Returns 

------- 

w : array 

Array of k eigenvalues 

v : array 

An array of k eigenvectors. V has the same shape as X. 

 

Other Parameters 

---------------- 

tol : scalar, optional 

Solver tolerance (stopping criterion) 

by default: tol=n*sqrt(eps) 

maxiter : integer, optional 

maximum number of iterations 

by default: maxiter=min(n,20) 

largest : bool, optional 

when True, solve for the largest eigenvalues, otherwise the smallest 

verbosityLevel : integer, optional 

controls solver output. default: verbosityLevel = 0. 

retLambdaHistory : boolean, optional 

whether to return eigenvalue history 

retResidualNormsHistory : boolean, optional 

whether to return history of residual norms 

 

Examples 

-------- 

 

Solve A x = lambda B x with constraints and preconditioning. 

 

>>> from scipy.sparse import spdiags, issparse 

>>> from scipy.sparse.linalg import lobpcg, LinearOperator 

>>> n = 100 

>>> vals = [np.arange(n, dtype=np.float64) + 1] 

>>> A = spdiags(vals, 0, n, n) 

>>> A.toarray() 

array([[ 1., 0., 0., ..., 0., 0., 0.], 

[ 0., 2., 0., ..., 0., 0., 0.], 

[ 0., 0., 3., ..., 0., 0., 0.], 

..., 

[ 0., 0., 0., ..., 98., 0., 0.], 

[ 0., 0., 0., ..., 0., 99., 0.], 

[ 0., 0., 0., ..., 0., 0., 100.]]) 

 

Constraints. 

 

>>> Y = np.eye(n, 3) 

 

Initial guess for eigenvectors, should have linearly independent 

columns. Column dimension = number of requested eigenvalues. 

 

>>> X = np.random.rand(n, 3) 

 

Preconditioner -- inverse of A (as an abstract linear operator). 

 

>>> invA = spdiags([1./vals[0]], 0, n, n) 

>>> def precond( x ): 

... return invA * x 

>>> M = LinearOperator(matvec=precond, shape=(n, n), dtype=float) 

 

Here, ``invA`` could of course have been used directly as a preconditioner. 

Let us then solve the problem: 

 

>>> eigs, vecs = lobpcg(A, X, Y=Y, M=M, tol=1e-4, maxiter=40, largest=False) 

>>> eigs 

array([ 4., 5., 6.]) 

 

Note that the vectors passed in Y are the eigenvectors of the 3 smallest 

eigenvalues. The results returned are orthogonal to those. 

 

Notes 

----- 

If both retLambdaHistory and retResidualNormsHistory are True, 

the return tuple has the following format 

(lambda, V, lambda history, residual norms history). 

 

In the following ``n`` denotes the matrix size and ``m`` the number 

of required eigenvalues (smallest or largest). 

 

The LOBPCG code internally solves eigenproblems of the size 3``m`` on every 

iteration by calling the "standard" dense eigensolver, so if ``m`` is not 

small enough compared to ``n``, it does not make sense to call the LOBPCG 

code, but rather one should use the "standard" eigensolver, 

e.g. numpy or scipy function in this case. 

If one calls the LOBPCG algorithm for 5``m``>``n``, 

it will most likely break internally, so the code tries to call the standard 

function instead. 

 

It is not that n should be large for the LOBPCG to work, but rather the 

ratio ``n``/``m`` should be large. It you call the LOBPCG code with ``m``=1 

and ``n``=10, it should work, though ``n`` is small. The method is intended 

for extremely large ``n``/``m``, see e.g., reference [28] in 

http://arxiv.org/abs/0705.2626 

 

The convergence speed depends basically on two factors: 

 

1. How well relatively separated the seeking eigenvalues are 

from the rest of the eigenvalues. 

One can try to vary ``m`` to make this better. 

 

2. How well conditioned the problem is. This can be changed by using proper 

preconditioning. For example, a rod vibration test problem (under tests 

directory) is ill-conditioned for large ``n``, so convergence will be 

slow, unless efficient preconditioning is used. 

For this specific problem, a good simple preconditioner function would 

be a linear solve for A, which is easy to code since A is tridiagonal. 

 

*Acknowledgements* 

 

lobpcg.py code was written by Robert Cimrman. 

Many thanks belong to Andrew Knyazev, the author of the algorithm, 

for lots of advice and support. 

 

References 

---------- 

.. [1] A. V. Knyazev (2001), 

Toward the Optimal Preconditioned Eigensolver: Locally Optimal 

Block Preconditioned Conjugate Gradient Method. 

SIAM Journal on Scientific Computing 23, no. 2, 

pp. 517-541. http://dx.doi.org/10.1137/S1064827500366124 

 

.. [2] A. V. Knyazev, I. Lashuk, M. E. Argentati, and E. Ovchinnikov (2007), 

Block Locally Optimal Preconditioned Eigenvalue Xolvers (BLOPEX) 

in hypre and PETSc. http://arxiv.org/abs/0705.2626 

 

.. [3] A. V. Knyazev's C and MATLAB implementations: 

https://bitbucket.org/joseroman/blopex 

 

""" 

blockVectorX = X 

blockVectorY = Y 

residualTolerance = tol 

maxIterations = maxiter 

 

if blockVectorY is not None: 

sizeY = blockVectorY.shape[1] 

else: 

sizeY = 0 

 

# Block size. 

if len(blockVectorX.shape) != 2: 

raise ValueError('expected rank-2 array for argument X') 

 

n, sizeX = blockVectorX.shape 

if sizeX > n: 

raise ValueError('X column dimension exceeds the row dimension') 

 

A = _makeOperator(A, (n,n)) 

B = _makeOperator(B, (n,n)) 

M = _makeOperator(M, (n,n)) 

 

if (n - sizeY) < (5 * sizeX): 

# warn('The problem size is small compared to the block size.' \ 

# ' Using dense eigensolver instead of LOBPCG.') 

 

if blockVectorY is not None: 

raise NotImplementedError('The dense eigensolver ' 

'does not support constraints.') 

 

# Define the closed range of indices of eigenvalues to return. 

if largest: 

eigvals = (n - sizeX, n-1) 

else: 

eigvals = (0, sizeX-1) 

 

A_dense = A(np.eye(n)) 

B_dense = None if B is None else B(np.eye(n)) 

return eigh(A_dense, B_dense, eigvals=eigvals, check_finite=False) 

 

if residualTolerance is None: 

residualTolerance = np.sqrt(1e-15) * n 

 

maxIterations = min(n, maxIterations) 

 

if verbosityLevel: 

aux = "Solving " 

if B is None: 

aux += "standard" 

else: 

aux += "generalized" 

aux += " eigenvalue problem with" 

if M is None: 

aux += "out" 

aux += " preconditioning\n\n" 

aux += "matrix size %d\n" % n 

aux += "block size %d\n\n" % sizeX 

if blockVectorY is None: 

aux += "No constraints\n\n" 

else: 

if sizeY > 1: 

aux += "%d constraints\n\n" % sizeY 

else: 

aux += "%d constraint\n\n" % sizeY 

print(aux) 

 

## 

# Apply constraints to X. 

if blockVectorY is not None: 

 

if B is not None: 

blockVectorBY = B(blockVectorY) 

else: 

blockVectorBY = blockVectorY 

 

# gramYBY is a dense array. 

gramYBY = np.dot(blockVectorY.T.conj(), blockVectorBY) 

try: 

# gramYBY is a Cholesky factor from now on... 

gramYBY = cho_factor(gramYBY) 

except: 

raise ValueError('cannot handle linearly dependent constraints') 

 

_applyConstraints(blockVectorX, gramYBY, blockVectorBY, blockVectorY) 

 

## 

# B-orthonormalize X. 

blockVectorX, blockVectorBX = _b_orthonormalize(B, blockVectorX) 

 

## 

# Compute the initial Ritz vectors: solve the eigenproblem. 

blockVectorAX = A(blockVectorX) 

gramXAX = np.dot(blockVectorX.T.conj(), blockVectorAX) 

 

_lambda, eigBlockVector = eigh(gramXAX, check_finite=False) 

ii = np.argsort(_lambda)[:sizeX] 

if largest: 

ii = ii[::-1] 

_lambda = _lambda[ii] 

 

eigBlockVector = np.asarray(eigBlockVector[:,ii]) 

blockVectorX = np.dot(blockVectorX, eigBlockVector) 

blockVectorAX = np.dot(blockVectorAX, eigBlockVector) 

if B is not None: 

blockVectorBX = np.dot(blockVectorBX, eigBlockVector) 

 

## 

# Active index set. 

activeMask = np.ones((sizeX,), dtype=bool) 

 

lambdaHistory = [_lambda] 

residualNormsHistory = [] 

 

previousBlockSize = sizeX 

ident = np.eye(sizeX, dtype=A.dtype) 

ident0 = np.eye(sizeX, dtype=A.dtype) 

 

## 

# Main iteration loop. 

 

blockVectorP = None # set during iteration 

blockVectorAP = None 

blockVectorBP = None 

 

for iterationNumber in xrange(maxIterations): 

if verbosityLevel > 0: 

print('iteration %d' % iterationNumber) 

 

aux = blockVectorBX * _lambda[np.newaxis,:] 

blockVectorR = blockVectorAX - aux 

 

aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0) 

residualNorms = np.sqrt(aux) 

 

residualNormsHistory.append(residualNorms) 

 

ii = np.where(residualNorms > residualTolerance, True, False) 

activeMask = activeMask & ii 

if verbosityLevel > 2: 

print(activeMask) 

 

currentBlockSize = activeMask.sum() 

if currentBlockSize != previousBlockSize: 

previousBlockSize = currentBlockSize 

ident = np.eye(currentBlockSize, dtype=A.dtype) 

 

if currentBlockSize == 0: 

break 

 

if verbosityLevel > 0: 

print('current block size:', currentBlockSize) 

print('eigenvalue:', _lambda) 

print('residual norms:', residualNorms) 

if verbosityLevel > 10: 

print(eigBlockVector) 

 

activeBlockVectorR = as2d(blockVectorR[:,activeMask]) 

 

if iterationNumber > 0: 

activeBlockVectorP = as2d(blockVectorP[:,activeMask]) 

activeBlockVectorAP = as2d(blockVectorAP[:,activeMask]) 

activeBlockVectorBP = as2d(blockVectorBP[:,activeMask]) 

 

if M is not None: 

# Apply preconditioner T to the active residuals. 

activeBlockVectorR = M(activeBlockVectorR) 

 

## 

# Apply constraints to the preconditioned residuals. 

if blockVectorY is not None: 

_applyConstraints(activeBlockVectorR, 

gramYBY, blockVectorBY, blockVectorY) 

 

## 

# B-orthonormalize the preconditioned residuals. 

 

aux = _b_orthonormalize(B, activeBlockVectorR) 

activeBlockVectorR, activeBlockVectorBR = aux 

 

activeBlockVectorAR = A(activeBlockVectorR) 

 

if iterationNumber > 0: 

aux = _b_orthonormalize(B, activeBlockVectorP, 

activeBlockVectorBP, retInvR=True) 

activeBlockVectorP, activeBlockVectorBP, invR = aux 

activeBlockVectorAP = np.dot(activeBlockVectorAP, invR) 

 

## 

# Perform the Rayleigh Ritz Procedure: 

# Compute symmetric Gram matrices: 

 

xaw = np.dot(blockVectorX.T.conj(), activeBlockVectorAR) 

waw = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAR) 

xbw = np.dot(blockVectorX.T.conj(), activeBlockVectorBR) 

 

if iterationNumber > 0: 

xap = np.dot(blockVectorX.T.conj(), activeBlockVectorAP) 

wap = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorAP) 

pap = np.dot(activeBlockVectorP.T.conj(), activeBlockVectorAP) 

xbp = np.dot(blockVectorX.T.conj(), activeBlockVectorBP) 

wbp = np.dot(activeBlockVectorR.T.conj(), activeBlockVectorBP) 

 

gramA = np.bmat([[np.diag(_lambda), xaw, xap], 

[xaw.T.conj(), waw, wap], 

[xap.T.conj(), wap.T.conj(), pap]]) 

 

gramB = np.bmat([[ident0, xbw, xbp], 

[xbw.T.conj(), ident, wbp], 

[xbp.T.conj(), wbp.T.conj(), ident]]) 

else: 

gramA = np.bmat([[np.diag(_lambda), xaw], 

[xaw.T.conj(), waw]]) 

gramB = np.bmat([[ident0, xbw], 

[xbw.T.conj(), ident]]) 

 

_assert_symmetric(gramA) 

_assert_symmetric(gramB) 

 

if verbosityLevel > 10: 

save(gramA, 'gramA') 

save(gramB, 'gramB') 

 

# Solve the generalized eigenvalue problem. 

_lambda, eigBlockVector = eigh(gramA, gramB, check_finite=False) 

ii = np.argsort(_lambda)[:sizeX] 

if largest: 

ii = ii[::-1] 

if verbosityLevel > 10: 

print(ii) 

 

_lambda = _lambda[ii] 

eigBlockVector = eigBlockVector[:,ii] 

 

lambdaHistory.append(_lambda) 

 

if verbosityLevel > 10: 

print('lambda:', _lambda) 

## # Normalize eigenvectors! 

## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 ) 

## eigVecNorms = np.sqrt( aux ) 

## eigBlockVector = eigBlockVector / eigVecNorms[np.newaxis,:] 

# eigBlockVector, aux = _b_orthonormalize( B, eigBlockVector ) 

 

if verbosityLevel > 10: 

print(eigBlockVector) 

pause() 

 

## 

# Compute Ritz vectors. 

if iterationNumber > 0: 

eigBlockVectorX = eigBlockVector[:sizeX] 

eigBlockVectorR = eigBlockVector[sizeX:sizeX+currentBlockSize] 

eigBlockVectorP = eigBlockVector[sizeX+currentBlockSize:] 

 

pp = np.dot(activeBlockVectorR, eigBlockVectorR) 

pp += np.dot(activeBlockVectorP, eigBlockVectorP) 

 

app = np.dot(activeBlockVectorAR, eigBlockVectorR) 

app += np.dot(activeBlockVectorAP, eigBlockVectorP) 

 

bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) 

bpp += np.dot(activeBlockVectorBP, eigBlockVectorP) 

else: 

eigBlockVectorX = eigBlockVector[:sizeX] 

eigBlockVectorR = eigBlockVector[sizeX:] 

 

pp = np.dot(activeBlockVectorR, eigBlockVectorR) 

app = np.dot(activeBlockVectorAR, eigBlockVectorR) 

bpp = np.dot(activeBlockVectorBR, eigBlockVectorR) 

 

if verbosityLevel > 10: 

print(pp) 

print(app) 

print(bpp) 

pause() 

 

blockVectorX = np.dot(blockVectorX, eigBlockVectorX) + pp 

blockVectorAX = np.dot(blockVectorAX, eigBlockVectorX) + app 

blockVectorBX = np.dot(blockVectorBX, eigBlockVectorX) + bpp 

 

blockVectorP, blockVectorAP, blockVectorBP = pp, app, bpp 

 

aux = blockVectorBX * _lambda[np.newaxis,:] 

blockVectorR = blockVectorAX - aux 

 

aux = np.sum(blockVectorR.conjugate() * blockVectorR, 0) 

residualNorms = np.sqrt(aux) 

 

if verbosityLevel > 0: 

print('final eigenvalue:', _lambda) 

print('final residual norms:', residualNorms) 

 

if retLambdaHistory: 

if retResidualNormsHistory: 

return _lambda, blockVectorX, lambdaHistory, residualNormsHistory 

else: 

return _lambda, blockVectorX, lambdaHistory 

else: 

if retResidualNormsHistory: 

return _lambda, blockVectorX, residualNormsHistory 

else: 

return _lambda, blockVectorX