1

2

3

4

5

6

7

8

9

10

11

12

13

14

15

16

17

18

19

20

21

22

23

24

25

26

27

28

29

30

31

32

33

34

35

36

37

38

39

40

41

42

43

44

45

46

47

48

49

50

51

52

53

54

55

56

57

58

59

60

61

62

63

64

65

66

67

68

69

70

71

72

73

74

75

76

77

78

79

80

81

82

83

84

85

86

87

88

89

90

91

92

93

94

95

96

97

98

99

100

101

102

103

104

105

106

107

108

109

110

111

112

113

114

115

116

117

118

119

120

121

122

123

124

125

126

127

128

129

130

131

132

133

134

135

136

137

138

139

140

141

142

143

144

145

146

147

148

149

150

151

152

153

154

155

156

157

158

159

160

161

162

163

164

165

166

167

168

169

170

171

172

173

174

175

176

177

178

179

180

181

182

183

184

185

186

187

188

189

190

191

192

193

194

195

196

197

198

199

200

201

202

203

204

205

206

207

208

209

210

211

212

213

214

215

216

217

218

219

220

221

222

223

224

225

226

227

228

229

230

231

232

233

234

235

236

237

238

239

240

241

242

243

244

245

246

247

248

249

250

251

252

253

254

255

256

257

258

259

260

261

262

263

264

265

266

267

268

269

270

271

272

273

274

275

276

277

278

279

280

281

282

283

284

285

286

287

288

289

290

291

292

293

294

295

296

297

298

299

300

301

302

303

304

305

306

307

308

309

310

311

312

313

314

315

316

317

318

319

320

321

322

323

324

325

326

327

328

329

330

331

332

333

334

335

336

337

338

339

340

341

342

343

344

345

346

347

348

349

350

351

352

353

354

355

356

357

358

359

360

361

362

363

364

365

366

367

368

369

370

371

372

373

374

375

376

377

378

379

380

381

382

383

384

385

386

387

388

389

390

391

392

393

394

395

396

397

398

399

400

401

402

403

404

405

406

407

408

409

410

411

412

413

414

415

416

417

418

419

420

421

422

423

424

425

426

427

428

429

430

431

432

433

434

435

436

437

438

439

440

441

442

443

444

445

446

447

448

449

450

451

452

453

454

455

456

457

458

459

460

461

462

463

464

465

466

467

468

469

470

471

472

473

474

475

476

477

478

479

480

481

482

483

484

485

486

487

488

489

490

491

492

493

494

495

496

497

498

499

500

501

502

503

504

505

506

507

508

509

510

511

512

513

514

515

516

517

518

519

520

521

522

523

524

525

526

527

528

529

530

531

532

533

534

535

536

537

538

539

540

541

542

543

544

545

546

547

548

549

550

551

552

553

554

555

556

557

558

559

560

561

562

563

564

565

566

567

568

569

570

571

572

573

574

575

576

577

578

579

580

581

582

583

584

585

586

587

588

589

590

591

592

593

594

595

596

597

598

599

600

601

602

603

604

605

606

607

608

609

610

611

612

613

614

615

616

617

618

619

620

621

622

623

624

625

626

627

628

629

630

631

632

633

634

635

636

637

638

639

640

641

642

643

644

645

646

647

648

649

650

651

652

653

654

655

656

657

658

659

660

661

662

663

664

665

666

667

668

669

670

671

672

673

674

675

676

677

678

679

680

681

682

683

684

685

686

687

688

689

690

691

692

693

694

695

696

697

698

699

700

701

702

703

704

705

706

707

708

709

710

711

712

713

714

715

716

717

718

719

720

721

722

723

724

725

726

727

728

729

730

731

732

733

734

735

736

737

738

739

740

741

742

743

744

745

746

747

748

749

750

751

752

753

754

755

756

757

758

759

760

761

762

763

764

765

766

767

768

769

770

771

772

773

774

775

776

777

778

779

780

781

782

783

784

785

786

787

788

789

790

791

792

793

794

795

796

797

798

799

800

801

802

803

804

805

806

807

808

809

810

811

812

813

814

815

816

817

818

819

820

821

822

823

824

825

826

827

828

829

830

831

832

833

834

835

836

837

838

839

840

841

842

843

844

845

846

847

848

849

850

851

852

853

854

855

856

857

858

859

860

861

862

863

864

865

866

867

868

869

870

871

872

873

874

875

876

877

878

879

880

881

882

883

884

885

886

887

888

889

890

891

892

893

894

895

896

897

898

899

900

901

902

903

904

905

906

907

908

909

910

911

912

913

914

915

916

917

918

919

920

921

922

923

924

925

926

927

928

929

930

931

932

933

934

935

936

937

938

939

940

941

942

943

944

945

946

947

948

949

950

951

952

953

954

955

956

957

958

959

960

961

962

963

964

965

966

967

968

969

970

971

972

973

974

975

976

977

978

979

980

981

982

983

984

985

986

987

988

989

990

991

992

993

994

995

996

997

998

999

1000

1001

1002

1003

1004

1005

1006

1007

1008

1009

1010

1011

1012

1013

1014

1015

1016

1017

1018

1019

1020

1021

1022

1023

1024

1025

1026

1027

1028

1029

1030

1031

1032

1033

1034

1035

1036

1037

1038

1039

1040

1041

1042

1043

1044

1045

1046

1047

1048

1049

1050

1051

1052

1053

1054

1055

1056

1057

1058

1059

1060

1061

1062

1063

1064

1065

1066

1067

1068

1069

1070

1071

1072

1073

1074

1075

1076

1077

1078

1079

1080

1081

1082

1083

1084

1085

1086

1087

1088

1089

1090

1091

1092

1093

1094

1095

1096

1097

1098

1099

1100

1101

1102

1103

1104

1105

1106

1107

1108

1109

1110

1111

1112

1113

1114

1115

1116

1117

1118

1119

1120

1121

1122

1123

1124

1125

1126

1127

1128

1129

1130

1131

1132

1133

1134

1135

1136

1137

1138

1139

1140

1141

1142

1143

1144

1145

1146

1147

1148

1149

1150

1151

1152

1153

1154

1155

1156

1157

1158

1159

1160

1161

1162

1163

1164

1165

1166

1167

1168

1169

1170

1171

1172

1173

1174

1175

1176

1177

1178

1179

1180

1181

1182

1183

1184

1185

1186

1187

1188

1189

1190

1191

1192

1193

1194

1195

1196

1197

1198

1199

1200

1201

1202

1203

1204

1205

1206

1207

1208

1209

1210

1211

1212

1213

1214

1215

1216

1217

1218

1219

1220

1221

1222

1223

1224

1225

1226

1227

1228

1229

1230

1231

1232

1233

1234

1235

1236

1237

1238

1239

1240

1241

1242

1243

1244

1245

1246

1247

1248

1249

1250

1251

1252

1253

1254

1255

1256

1257

1258

1259

1260

1261

1262

1263

1264

1265

1266

1267

1268

1269

1270

1271

1272

1273

1274

1275

1276

1277

1278

1279

1280

1281

1282

1283

1284

1285

1286

1287

1288

1289

1290

1291

1292

1293

1294

1295

1296

1297

1298

1299

1300

1301

1302

1303

1304

1305

1306

1307

1308

1309

1310

1311

1312

1313

1314

1315

1316

1317

1318

1319

1320

1321

1322

1323

1324

1325

1326

1327

1328

1329

1330

1331

1332

1333

1334

1335

1336

1337

1338

1339

1340

1341

1342

1343

1344

1345

1346

1347

1348

1349

1350

1351

1352

1353

1354

1355

1356

1357

1358

1359

1360

1361

1362

1363

1364

1365

1366

1367

1368

1369

1370

1371

1372

1373

1374

1375

1376

1377

1378

1379

1380

1381

1382

1383

1384

1385

1386

1387

1388

1389

1390

1391

1392

1393

1394

1395

1396

1397

1398

1399

1400

1401

1402

1403

1404

1405

1406

1407

1408

1409

1410

1411

1412

1413

1414

1415

1416

1417

1418

1419

1420

1421

1422

1423

1424

1425

1426

1427

1428

1429

1430

1431

1432

1433

1434

1435

1436

1437

1438

1439

1440

1441

1442

1443

1444

1445

1446

1447

1448

1449

1450

1451

1452

1453

1454

1455

1456

1457

1458

1459

1460

1461

1462

1463

1464

1465

1466

1467

1468

1469

1470

1471

1472

1473

1474

1475

1476

1477

1478

1479

1480

1481

1482

1483

1484

1485

1486

1487

1488

1489

1490

1491

1492

1493

1494

1495

1496

1497

1498

1499

1500

1501

1502

1503

1504

1505

1506

1507

1508

1509

1510

1511

1512

1513

1514

1515

1516

1517

1518

1519

1520

1521

1522

1523

1524

1525

1526

1527

1528

1529

1530

1531

1532

1533

1534

1535

1536

1537

1538

1539

1540

1541

1542

1543

1544

1545

1546

1547

1548

1549

1550

1551

1552

1553

1554

1555

1556

1557

1558

1559

1560

1561

1562

1563

1564

1565

1566

1567

1568

1569

1570

1571

1572

1573

1574

1575

1576

1577

1578

1579

1580

1581

1582

1583

1584

1585

1586

1587

1588

1589

1590

1591

1592

1593

1594

1595

1596

1597

1598

1599

1600

1601

1602

1603

1604

1605

1606

1607

1608

1609

1610

1611

1612

1613

1614

1615

1616

1617

1618

1619

1620

1621

1622

1623

1624

1625

1626

1627

1628

1629

1630

1631

1632

1633

1634

1635

1636

1637

1638

1639

1640

1641

1642

1643

1644

1645

1646

1647

1648

1649

1650

1651

1652

1653

1654

1655

1656

1657

1658

1659

1660

1661

1662

1663

1664

1665

1666

1667

1668

1669

1670

1671

1672

1673

1674

1675

1676

1677

1678

1679

1680

1681

1682

1683

1684

1685

1686

1687

1688

1689

1690

1691

1692

1693

1694

1695

1696

1697

1698

1699

1700

1701

1702

1703

1704

1705

1706

1707

1708

1709

1710

1711

1712

1713

1714

1715

1716

1717

1718

1719

1720

1721

1722

1723

1724

1725

1726

1727

1728

1729

1730

1731

1732

1733

1734

1735

1736

1737

1738

1739

1740

1741

1742

1743

1744

1745

1746

1747

1748

1749

1750

1751

1752

1753

1754

1755

1756

1757

1758

1759

1760

1761

1762

1763

1764

1765

1766

1767

1768

1769

1770

1771

1772

1773

1774

1775

1776

1777

1778

1779

1780

1781

1782

1783

1784

1785

1786

1787

1788

1789

1790

1791

1792

1793

1794

1795

1796

1797

1798

1799

1800

1801

1802

1803

1804

1805

1806

1807

1808

1809

1810

1811

1812

1813

1814

1815

1816

1817

1818

1819

1820

1821

1822

1823

1824

1825

1826

1827

1828

1829

1830

1831

1832

1833

1834

1835

1836

1837

1838

1839

1840

1841

1842

1843

1844

1845

1846

1847

1848

1849

1850

1851

1852

1853

1854

1855

1856

1857

1858

1859

1860

1861

1862

1863

1864

1865

1866

1867

1868

1869

1870

1871

1872

1873

1874

1875

1876

1877

1878

1879

1880

1881

1882

1883

1884

1885

1886

1887

1888

1889

1890

1891

1892

1893

1894

1895

1896

1897

1898

1899

1900

1901

1902

1903

1904

1905

1906

1907

1908

1909

1910

1911

1912

1913

1914

1915

1916

1917

1918

1919

1920

1921

1922

1923

1924

1925

1926

1927

1928

1929

1930

1931

1932

1933

1934

1935

1936

1937

1938

1939

1940

1941

1942

1943

1944

1945

1946

1947

1948

1949

1950

1951

1952

1953

1954

1955

1956

1957

1958

1959

1960

1961

1962

1963

1964

1965

1966

1967

1968

1969

1970

1971

1972

1973

1974

1975

1976

1977

1978

1979

1980

1981

1982

1983

1984

1985

1986

1987

1988

1989

1990

1991

1992

1993

1994

1995

1996

1997

1998

1999

2000

2001

2002

2003

2004

2005

2006

2007

2008

2009

2010

2011

2012

2013

2014

2015

2016

2017

2018

2019

2020

2021

2022

2023

2024

2025

2026

2027

2028

2029

2030

2031

2032

2033

2034

2035

2036

2037

2038

2039

2040

2041

2042

2043

2044

2045

2046

2047

2048

2049

2050

2051

2052

2053

2054

2055

2056

2057

2058

2059

2060

2061

2062

2063

2064

2065

2066

2067

2068

2069

2070

2071

2072

2073

2074

2075

2076

2077

2078

2079

2080

2081

2082

2083

2084

2085

2086

2087

2088

2089

2090

2091

2092

2093

2094

2095

2096

2097

2098

2099

2100

2101

2102

2103

2104

2105

2106

2107

2108

2109

2110

2111

2112

2113

2114

2115

2116

2117

2118

2119

2120

2121

2122

2123

2124

2125

2126

2127

2128

2129

2130

2131

2132

2133

2134

2135

2136

2137

2138

2139

2140

2141

2142

2143

2144

2145

2146

2147

2148

2149

2150

2151

2152

2153

2154

2155

2156

2157

2158

2159

2160

2161

2162

2163

2164

2165

2166

2167

2168

2169

2170

2171

2172

2173

2174

2175

2176

2177

2178

2179

2180

2181

2182

2183

2184

2185

2186

2187

2188

2189

2190

2191

2192

2193

2194

2195

2196

2197

2198

2199

2200

2201

2202

2203

2204

2205

2206

2207

2208

2209

2210

2211

2212

2213

2214

2215

2216

2217

2218

2219

2220

2221

2222

2223

2224

2225

2226

2227

2228

2229

2230

2231

2232

2233

2234

2235

2236

2237

2238

2239

2240

2241

2242

2243

2244

2245

2246

2247

2248

2249

2250

2251

2252

2253

2254

2255

2256

2257

2258

2259

2260

2261

2262

2263

2264

2265

2266

2267

2268

2269

2270

2271

2272

2273

2274

2275

2276

2277

2278

2279

2280

2281

2282

2283

2284

2285

2286

2287

2288

2289

2290

2291

2292

2293

2294

2295

2296

2297

2298

2299

2300

2301

2302

2303

2304

2305

2306

2307

2308

2309

2310

2311

2312

2313

2314

2315

2316

2317

2318

2319

2320

2321

2322

2323

2324

2325

2326

2327

2328

2329

2330

2331

2332

2333

2334

2335

2336

2337

2338

2339

2340

2341

2342

2343

2344

2345

2346

2347

2348

2349

2350

2351

2352

2353

2354

2355

2356

2357

2358

2359

2360

2361

2362

2363

2364

2365

2366

2367

2368

2369

2370

2371

2372

2373

2374

2375

2376

2377

2378

2379

2380

2381

2382

2383

2384

2385

2386

2387

2388

2389

2390

2391

2392

2393

2394

2395

2396

2397

2398

2399

2400

2401

2402

2403

2404

2405

2406

2407

2408

2409

2410

2411

2412

2413

2414

2415

2416

2417

2418

2419

2420

2421

2422

2423

2424

2425

2426

2427

2428

2429

2430

2431

2432

2433

2434

2435

2436

2437

2438

2439

2440

2441

2442

2443

2444

2445

2446

2447

2448

2449

2450

2451

2452

2453

2454

2455

2456

2457

2458

2459

2460

2461

2462

2463

2464

2465

2466

2467

2468

2469

2470

2471

2472

2473

2474

2475

2476

2477

2478

2479

2480

2481

2482

2483

2484

2485

2486

2487

2488

2489

2490

2491

2492

2493

2494

2495

2496

2497

2498

2499

2500

2501

2502

2503

2504

2505

2506

2507

2508

2509

2510

2511

2512

2513

2514

2515

2516

2517

2518

2519

2520

2521

2522

2523

2524

2525

2526

2527

2528

2529

2530

2531

2532

2533

2534

2535

2536

2537

2538

2539

2540

2541

2542

2543

2544

2545

2546

2547

2548

2549

2550

2551

2552

2553

2554

2555

2556

2557

2558

2559

2560

2561

2562

2563

2564

2565

2566

2567

2568

2569

2570

2571

2572

2573

2574

2575

2576

2577

2578

2579

2580

2581

2582

2583

2584

2585

2586

2587

2588

2589

2590

2591

2592

2593

2594

2595

2596

2597

2598

2599

2600

2601

2602

2603

2604

2605

2606

2607

2608

2609

2610

2611

2612

2613

2614

2615

2616

2617

2618

2619

2620

2621

2622

2623

2624

2625

2626

2627

2628

2629

2630

2631

2632

2633

2634

2635

2636

2637

2638

2639

2640

2641

2642

2643

2644

2645

2646

2647

2648

2649

2650

2651

2652

2653

2654

2655

2656

2657

2658

2659

2660

2661

2662

2663

2664

2665

2666

2667

2668

2669

2670

2671

2672

2673

2674

2675

2676

2677

2678

2679

2680

2681

2682

2683

2684

2685

2686

2687

2688

2689

2690

2691

2692

2693

2694

2695

2696

2697

2698

2699

2700

2701

2702

2703

2704

2705

2706

2707

2708

2709

2710

2711

2712

2713

2714

2715

2716

2717

2718

2719

2720

2721

2722

2723

2724

2725

2726

2727

2728

2729

2730

2731

2732

2733

2734

2735

2736

2737

2738

2739

2740

2741

2742

2743

2744

2745

2746

2747

2748

2749

2750

2751

2752

2753

2754

2755

2756

2757

2758

2759

2760

2761

2762

2763

2764

2765

2766

2767

2768

2769

2770

2771

2772

2773

2774

2775

from __future__ import division, print_function, absolute_import 

 

import math 

import warnings 

from collections import namedtuple 

 

import numpy as np 

from numpy import (isscalar, r_, log, around, unique, asarray, 

zeros, arange, sort, amin, amax, any, atleast_1d, 

sqrt, ceil, floor, array, compress, 

pi, exp, ravel, count_nonzero, sin, cos, arctan2, hypot) 

 

from scipy._lib.six import string_types 

from scipy import optimize 

from scipy import special 

from . import statlib 

from . import stats 

from .stats import find_repeats, _contains_nan 

from .contingency import chi2_contingency 

from . import distributions 

from ._distn_infrastructure import rv_generic 

 

 

__all__ = ['mvsdist', 

'bayes_mvs', 'kstat', 'kstatvar', 'probplot', 'ppcc_max', 'ppcc_plot', 

'boxcox_llf', 'boxcox', 'boxcox_normmax', 'boxcox_normplot', 

'shapiro', 'anderson', 'ansari', 'bartlett', 'levene', 'binom_test', 

'fligner', 'mood', 'wilcoxon', 'median_test', 

'circmean', 'circvar', 'circstd', 'anderson_ksamp' 

] 

 

 

Mean = namedtuple('Mean', ('statistic', 'minmax')) 

Variance = namedtuple('Variance', ('statistic', 'minmax')) 

Std_dev = namedtuple('Std_dev', ('statistic', 'minmax')) 

 

 

def bayes_mvs(data, alpha=0.90): 

r""" 

Bayesian confidence intervals for the mean, var, and std. 

 

Parameters 

---------- 

data : array_like 

Input data, if multi-dimensional it is flattened to 1-D by `bayes_mvs`. 

Requires 2 or more data points. 

alpha : float, optional 

Probability that the returned confidence interval contains 

the true parameter. 

 

Returns 

------- 

mean_cntr, var_cntr, std_cntr : tuple 

The three results are for the mean, variance and standard deviation, 

respectively. Each result is a tuple of the form:: 

 

(center, (lower, upper)) 

 

with `center` the mean of the conditional pdf of the value given the 

data, and `(lower, upper)` a confidence interval, centered on the 

median, containing the estimate to a probability ``alpha``. 

 

See Also 

-------- 

mvsdist 

 

Notes 

----- 

Each tuple of mean, variance, and standard deviation estimates represent 

the (center, (lower, upper)) with center the mean of the conditional pdf 

of the value given the data and (lower, upper) is a confidence interval 

centered on the median, containing the estimate to a probability 

``alpha``. 

 

Converts data to 1-D and assumes all data has the same mean and variance. 

Uses Jeffrey's prior for variance and std. 

 

Equivalent to ``tuple((x.mean(), x.interval(alpha)) for x in mvsdist(dat))`` 

 

References 

---------- 

T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and 

standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278, 

2006. 

 

Examples 

-------- 

First a basic example to demonstrate the outputs: 

 

>>> from scipy import stats 

>>> data = [6, 9, 12, 7, 8, 8, 13] 

>>> mean, var, std = stats.bayes_mvs(data) 

>>> mean 

Mean(statistic=9.0, minmax=(7.103650222612533, 10.896349777387467)) 

>>> var 

Variance(statistic=10.0, minmax=(3.176724206..., 24.45910382...)) 

>>> std 

Std_dev(statistic=2.9724954732045084, minmax=(1.7823367265645143, 4.945614605014631)) 

 

Now we generate some normally distributed random data, and get estimates of 

mean and standard deviation with 95% confidence intervals for those 

estimates: 

 

>>> n_samples = 100000 

>>> data = stats.norm.rvs(size=n_samples) 

>>> res_mean, res_var, res_std = stats.bayes_mvs(data, alpha=0.95) 

 

>>> import matplotlib.pyplot as plt 

>>> fig = plt.figure() 

>>> ax = fig.add_subplot(111) 

>>> ax.hist(data, bins=100, density=True, label='Histogram of data') 

>>> ax.vlines(res_mean.statistic, 0, 0.5, colors='r', label='Estimated mean') 

>>> ax.axvspan(res_mean.minmax[0],res_mean.minmax[1], facecolor='r', 

... alpha=0.2, label=r'Estimated mean (95% limits)') 

>>> ax.vlines(res_std.statistic, 0, 0.5, colors='g', label='Estimated scale') 

>>> ax.axvspan(res_std.minmax[0],res_std.minmax[1], facecolor='g', alpha=0.2, 

... label=r'Estimated scale (95% limits)') 

 

>>> ax.legend(fontsize=10) 

>>> ax.set_xlim([-4, 4]) 

>>> ax.set_ylim([0, 0.5]) 

>>> plt.show() 

 

""" 

m, v, s = mvsdist(data) 

if alpha >= 1 or alpha <= 0: 

raise ValueError("0 < alpha < 1 is required, but alpha=%s was given." 

% alpha) 

 

m_res = Mean(m.mean(), m.interval(alpha)) 

v_res = Variance(v.mean(), v.interval(alpha)) 

s_res = Std_dev(s.mean(), s.interval(alpha)) 

 

return m_res, v_res, s_res 

 

 

def mvsdist(data): 

""" 

'Frozen' distributions for mean, variance, and standard deviation of data. 

 

Parameters 

---------- 

data : array_like 

Input array. Converted to 1-D using ravel. 

Requires 2 or more data-points. 

 

Returns 

------- 

mdist : "frozen" distribution object 

Distribution object representing the mean of the data 

vdist : "frozen" distribution object 

Distribution object representing the variance of the data 

sdist : "frozen" distribution object 

Distribution object representing the standard deviation of the data 

 

See Also 

-------- 

bayes_mvs 

 

Notes 

----- 

The return values from ``bayes_mvs(data)`` is equivalent to 

``tuple((x.mean(), x.interval(0.90)) for x in mvsdist(data))``. 

 

In other words, calling ``<dist>.mean()`` and ``<dist>.interval(0.90)`` 

on the three distribution objects returned from this function will give 

the same results that are returned from `bayes_mvs`. 

 

References 

---------- 

T.E. Oliphant, "A Bayesian perspective on estimating mean, variance, and 

standard-deviation from data", http://scholarsarchive.byu.edu/facpub/278, 

2006. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> data = [6, 9, 12, 7, 8, 8, 13] 

>>> mean, var, std = stats.mvsdist(data) 

 

We now have frozen distribution objects "mean", "var" and "std" that we can 

examine: 

 

>>> mean.mean() 

9.0 

>>> mean.interval(0.95) 

(6.6120585482655692, 11.387941451734431) 

>>> mean.std() 

1.1952286093343936 

 

""" 

x = ravel(data) 

n = len(x) 

if n < 2: 

raise ValueError("Need at least 2 data-points.") 

xbar = x.mean() 

C = x.var() 

if n > 1000: # gaussian approximations for large n 

mdist = distributions.norm(loc=xbar, scale=math.sqrt(C / n)) 

sdist = distributions.norm(loc=math.sqrt(C), scale=math.sqrt(C / (2. * n))) 

vdist = distributions.norm(loc=C, scale=math.sqrt(2.0 / n) * C) 

else: 

nm1 = n - 1 

fac = n * C / 2. 

val = nm1 / 2. 

mdist = distributions.t(nm1, loc=xbar, scale=math.sqrt(C / nm1)) 

sdist = distributions.gengamma(val, -2, scale=math.sqrt(fac)) 

vdist = distributions.invgamma(val, scale=fac) 

return mdist, vdist, sdist 

 

 

def kstat(data, n=2): 

r""" 

Return the nth k-statistic (1<=n<=4 so far). 

 

The nth k-statistic k_n is the unique symmetric unbiased estimator of the 

nth cumulant kappa_n. 

 

Parameters 

---------- 

data : array_like 

Input array. Note that n-D input gets flattened. 

n : int, {1, 2, 3, 4}, optional 

Default is equal to 2. 

 

Returns 

------- 

kstat : float 

The nth k-statistic. 

 

See Also 

-------- 

kstatvar: Returns an unbiased estimator of the variance of the k-statistic. 

moment: Returns the n-th central moment about the mean for a sample. 

 

Notes 

----- 

For a sample size n, the first few k-statistics are given by: 

 

.. math:: 

 

k_{1} = \mu 

k_{2} = \frac{n}{n-1} m_{2} 

k_{3} = \frac{ n^{2} } {(n-1) (n-2)} m_{3} 

k_{4} = \frac{ n^{2} [(n + 1)m_{4} - 3(n - 1) m^2_{2}]} {(n-1) (n-2) (n-3)} 

 

where :math:`\mu` is the sample mean, :math:`m_2` is the sample 

variance, and :math:`m_i` is the i-th sample central moment. 

 

References 

---------- 

http://mathworld.wolfram.com/k-Statistic.html 

 

http://mathworld.wolfram.com/Cumulant.html 

 

Examples 

-------- 

>>> from scipy import stats 

>>> rndm = np.random.RandomState(1234) 

 

As sample size increases, n-th moment and n-th k-statistic converge to the 

same number (although they aren't identical). In the case of the normal 

distribution, they converge to zero. 

 

>>> for n in [2, 3, 4, 5, 6, 7]: 

... x = rndm.normal(size=10**n) 

... m, k = stats.moment(x, 3), stats.kstat(x, 3) 

... print("%.3g %.3g %.3g" % (m, k, m-k)) 

-0.631 -0.651 0.0194 

0.0282 0.0283 -8.49e-05 

-0.0454 -0.0454 1.36e-05 

7.53e-05 7.53e-05 -2.26e-09 

0.00166 0.00166 -4.99e-09 

-2.88e-06 -2.88e-06 8.63e-13 

""" 

if n > 4 or n < 1: 

raise ValueError("k-statistics only supported for 1<=n<=4") 

n = int(n) 

S = np.zeros(n + 1, np.float64) 

data = ravel(data) 

N = data.size 

 

# raise ValueError on empty input 

if N == 0: 

raise ValueError("Data input must not be empty") 

 

# on nan input, return nan without warning 

if np.isnan(np.sum(data)): 

return np.nan 

 

for k in range(1, n + 1): 

S[k] = np.sum(data**k, axis=0) 

if n == 1: 

return S[1] * 1.0/N 

elif n == 2: 

return (N*S[2] - S[1]**2.0) / (N*(N - 1.0)) 

elif n == 3: 

return (2*S[1]**3 - 3*N*S[1]*S[2] + N*N*S[3]) / (N*(N - 1.0)*(N - 2.0)) 

elif n == 4: 

return ((-6*S[1]**4 + 12*N*S[1]**2 * S[2] - 3*N*(N-1.0)*S[2]**2 - 

4*N*(N+1)*S[1]*S[3] + N*N*(N+1)*S[4]) / 

(N*(N-1.0)*(N-2.0)*(N-3.0))) 

else: 

raise ValueError("Should not be here.") 

 

 

def kstatvar(data, n=2): 

r""" 

Returns an unbiased estimator of the variance of the k-statistic. 

 

See `kstat` for more details of the k-statistic. 

 

Parameters 

---------- 

data : array_like 

Input array. Note that n-D input gets flattened. 

n : int, {1, 2}, optional 

Default is equal to 2. 

 

Returns 

------- 

kstatvar : float 

The nth k-statistic variance. 

 

See Also 

-------- 

kstat: Returns the n-th k-statistic. 

moment: Returns the n-th central moment about the mean for a sample. 

 

Notes 

----- 

The variances of the first few k-statistics are given by: 

 

.. math:: 

 

var(k_{1}) = \frac{\kappa^2}{n} 

var(k_{2}) = \frac{\kappa^4}{n} + \frac{2\kappa^2_{2}}{n - 1} 

var(k_{3}) = \frac{\kappa^6}{n} + \frac{9 \kappa_2 \kappa_4}{n - 1} + 

\frac{9 \kappa^2_{3}}{n - 1} + 

\frac{6 n \kappa^3_{2}}{(n-1) (n-2)} 

var(k_{4}) = \frac{\kappa^8}{n} + \frac{16 \kappa_2 \kappa_6}{n - 1} + 

\frac{48 \kappa_{3} \kappa_5}{n - 1} + 

\frac{34 \kappa^2_{4}}{n-1} + \frac{72 n \kappa^2_{2} \kappa_4}{(n - 1) (n - 2)} + 

\frac{144 n \kappa_{2} \kappa^2_{3}}{(n - 1) (n - 2)} + 

\frac{24 (n + 1) n \kappa^4_{2}}{(n - 1) (n - 2) (n - 3)} 

""" 

data = ravel(data) 

N = len(data) 

if n == 1: 

return kstat(data, n=2) * 1.0/N 

elif n == 2: 

k2 = kstat(data, n=2) 

k4 = kstat(data, n=4) 

return (2*N*k2**2 + (N-1)*k4) / (N*(N+1)) 

else: 

raise ValueError("Only n=1 or n=2 supported.") 

 

 

def _calc_uniform_order_statistic_medians(n): 

""" 

Approximations of uniform order statistic medians. 

 

Parameters 

---------- 

n : int 

Sample size. 

 

Returns 

------- 

v : 1d float array 

Approximations of the order statistic medians. 

 

References 

---------- 

.. [1] James J. Filliben, "The Probability Plot Correlation Coefficient 

Test for Normality", Technometrics, Vol. 17, pp. 111-117, 1975. 

 

Examples 

-------- 

Order statistics of the uniform distribution on the unit interval 

are marginally distributed according to beta distributions. 

The expectations of these order statistic are evenly spaced across 

the interval, but the distributions are skewed in a way that 

pushes the medians slightly towards the endpoints of the unit interval: 

 

>>> n = 4 

>>> k = np.arange(1, n+1) 

>>> from scipy.stats import beta 

>>> a = k 

>>> b = n-k+1 

>>> beta.mean(a, b) 

array([ 0.2, 0.4, 0.6, 0.8]) 

>>> beta.median(a, b) 

array([ 0.15910358, 0.38572757, 0.61427243, 0.84089642]) 

 

The Filliben approximation uses the exact medians of the smallest 

and greatest order statistics, and the remaining medians are approximated 

by points spread evenly across a sub-interval of the unit interval: 

 

>>> from scipy.morestats import _calc_uniform_order_statistic_medians 

>>> _calc_uniform_order_statistic_medians(n) 

array([ 0.15910358, 0.38545246, 0.61454754, 0.84089642]) 

 

This plot shows the skewed distributions of the order statistics 

of a sample of size four from a uniform distribution on the unit interval: 

 

>>> import matplotlib.pyplot as plt 

>>> x = np.linspace(0.0, 1.0, num=50, endpoint=True) 

>>> pdfs = [beta.pdf(x, a[i], b[i]) for i in range(n)] 

>>> plt.figure() 

>>> plt.plot(x, pdfs[0], x, pdfs[1], x, pdfs[2], x, pdfs[3]) 

 

""" 

v = np.zeros(n, dtype=np.float64) 

v[-1] = 0.5**(1.0 / n) 

v[0] = 1 - v[-1] 

i = np.arange(2, n) 

v[1:-1] = (i - 0.3175) / (n + 0.365) 

return v 

 

 

def _parse_dist_kw(dist, enforce_subclass=True): 

"""Parse `dist` keyword. 

 

Parameters 

---------- 

dist : str or stats.distributions instance. 

Several functions take `dist` as a keyword, hence this utility 

function. 

enforce_subclass : bool, optional 

If True (default), `dist` needs to be a 

`_distn_infrastructure.rv_generic` instance. 

It can sometimes be useful to set this keyword to False, if a function 

wants to accept objects that just look somewhat like such an instance 

(for example, they have a ``ppf`` method). 

 

""" 

if isinstance(dist, rv_generic): 

pass 

elif isinstance(dist, string_types): 

try: 

dist = getattr(distributions, dist) 

except AttributeError: 

raise ValueError("%s is not a valid distribution name" % dist) 

elif enforce_subclass: 

msg = ("`dist` should be a stats.distributions instance or a string " 

"with the name of such a distribution.") 

raise ValueError(msg) 

 

return dist 

 

 

def _add_axis_labels_title(plot, xlabel, ylabel, title): 

"""Helper function to add axes labels and a title to stats plots""" 

try: 

if hasattr(plot, 'set_title'): 

# Matplotlib Axes instance or something that looks like it 

plot.set_title(title) 

plot.set_xlabel(xlabel) 

plot.set_ylabel(ylabel) 

else: 

# matplotlib.pyplot module 

plot.title(title) 

plot.xlabel(xlabel) 

plot.ylabel(ylabel) 

except: 

# Not an MPL object or something that looks (enough) like it. 

# Don't crash on adding labels or title 

pass 

 

 

def probplot(x, sparams=(), dist='norm', fit=True, plot=None, rvalue=False): 

""" 

Calculate quantiles for a probability plot, and optionally show the plot. 

 

Generates a probability plot of sample data against the quantiles of a 

specified theoretical distribution (the normal distribution by default). 

`probplot` optionally calculates a best-fit line for the data and plots the 

results using Matplotlib or a given plot function. 

 

Parameters 

---------- 

x : array_like 

Sample/response data from which `probplot` creates the plot. 

sparams : tuple, optional 

Distribution-specific shape parameters (shape parameters plus location 

and scale). 

dist : str or stats.distributions instance, optional 

Distribution or distribution function name. The default is 'norm' for a 

normal probability plot. Objects that look enough like a 

stats.distributions instance (i.e. they have a ``ppf`` method) are also 

accepted. 

fit : bool, optional 

Fit a least-squares regression (best-fit) line to the sample data if 

True (default). 

plot : object, optional 

If given, plots the quantiles and least squares fit. 

`plot` is an object that has to have methods "plot" and "text". 

The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, 

or a custom object with the same methods. 

Default is None, which means that no plot is created. 

 

Returns 

------- 

(osm, osr) : tuple of ndarrays 

Tuple of theoretical quantiles (osm, or order statistic medians) and 

ordered responses (osr). `osr` is simply sorted input `x`. 

For details on how `osm` is calculated see the Notes section. 

(slope, intercept, r) : tuple of floats, optional 

Tuple containing the result of the least-squares fit, if that is 

performed by `probplot`. `r` is the square root of the coefficient of 

determination. If ``fit=False`` and ``plot=None``, this tuple is not 

returned. 

 

Notes 

----- 

Even if `plot` is given, the figure is not shown or saved by `probplot`; 

``plt.show()`` or ``plt.savefig('figname.png')`` should be used after 

calling `probplot`. 

 

`probplot` generates a probability plot, which should not be confused with 

a Q-Q or a P-P plot. Statsmodels has more extensive functionality of this 

type, see ``statsmodels.api.ProbPlot``. 

 

The formula used for the theoretical quantiles (horizontal axis of the 

probability plot) is Filliben's estimate:: 

 

quantiles = dist.ppf(val), for 

 

0.5**(1/n), for i = n 

val = (i - 0.3175) / (n + 0.365), for i = 2, ..., n-1 

1 - 0.5**(1/n), for i = 1 

 

where ``i`` indicates the i-th ordered value and ``n`` is the total number 

of values. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> import matplotlib.pyplot as plt 

>>> nsample = 100 

>>> np.random.seed(7654321) 

 

A t distribution with small degrees of freedom: 

 

>>> ax1 = plt.subplot(221) 

>>> x = stats.t.rvs(3, size=nsample) 

>>> res = stats.probplot(x, plot=plt) 

 

A t distribution with larger degrees of freedom: 

 

>>> ax2 = plt.subplot(222) 

>>> x = stats.t.rvs(25, size=nsample) 

>>> res = stats.probplot(x, plot=plt) 

 

A mixture of two normal distributions with broadcasting: 

 

>>> ax3 = plt.subplot(223) 

>>> x = stats.norm.rvs(loc=[0,5], scale=[1,1.5], 

... size=(nsample//2,2)).ravel() 

>>> res = stats.probplot(x, plot=plt) 

 

A standard normal distribution: 

 

>>> ax4 = plt.subplot(224) 

>>> x = stats.norm.rvs(loc=0, scale=1, size=nsample) 

>>> res = stats.probplot(x, plot=plt) 

 

Produce a new figure with a loggamma distribution, using the ``dist`` and 

``sparams`` keywords: 

 

>>> fig = plt.figure() 

>>> ax = fig.add_subplot(111) 

>>> x = stats.loggamma.rvs(c=2.5, size=500) 

>>> res = stats.probplot(x, dist=stats.loggamma, sparams=(2.5,), plot=ax) 

>>> ax.set_title("Probplot for loggamma dist with shape parameter 2.5") 

 

Show the results with Matplotlib: 

 

>>> plt.show() 

 

""" 

x = np.asarray(x) 

_perform_fit = fit or (plot is not None) 

if x.size == 0: 

if _perform_fit: 

return (x, x), (np.nan, np.nan, 0.0) 

else: 

return x, x 

 

osm_uniform = _calc_uniform_order_statistic_medians(len(x)) 

dist = _parse_dist_kw(dist, enforce_subclass=False) 

if sparams is None: 

sparams = () 

if isscalar(sparams): 

sparams = (sparams,) 

if not isinstance(sparams, tuple): 

sparams = tuple(sparams) 

 

osm = dist.ppf(osm_uniform, *sparams) 

osr = sort(x) 

if _perform_fit: 

# perform a linear least squares fit. 

slope, intercept, r, prob, sterrest = stats.linregress(osm, osr) 

 

if plot is not None: 

plot.plot(osm, osr, 'bo', osm, slope*osm + intercept, 'r-') 

_add_axis_labels_title(plot, xlabel='Theoretical quantiles', 

ylabel='Ordered Values', 

title='Probability Plot') 

 

# Add R^2 value to the plot as text 

if rvalue: 

xmin = amin(osm) 

xmax = amax(osm) 

ymin = amin(x) 

ymax = amax(x) 

posx = xmin + 0.70 * (xmax - xmin) 

posy = ymin + 0.01 * (ymax - ymin) 

plot.text(posx, posy, "$R^2=%1.4f$" % r**2) 

 

if fit: 

return (osm, osr), (slope, intercept, r) 

else: 

return osm, osr 

 

 

def ppcc_max(x, brack=(0.0, 1.0), dist='tukeylambda'): 

""" 

Calculate the shape parameter that maximizes the PPCC 

 

The probability plot correlation coefficient (PPCC) plot can be used to 

determine the optimal shape parameter for a one-parameter family of 

distributions. ppcc_max returns the shape parameter that would maximize the 

probability plot correlation coefficient for the given data to a 

one-parameter family of distributions. 

 

Parameters 

---------- 

x : array_like 

Input array. 

brack : tuple, optional 

Triple (a,b,c) where (a<b<c). If bracket consists of two numbers (a, c) 

then they are assumed to be a starting interval for a downhill bracket 

search (see `scipy.optimize.brent`). 

dist : str or stats.distributions instance, optional 

Distribution or distribution function name. Objects that look enough 

like a stats.distributions instance (i.e. they have a ``ppf`` method) 

are also accepted. The default is ``'tukeylambda'``. 

 

Returns 

------- 

shape_value : float 

The shape parameter at which the probability plot correlation 

coefficient reaches its max value. 

 

See also 

-------- 

ppcc_plot, probplot, boxcox 

 

Notes 

----- 

The brack keyword serves as a starting point which is useful in corner 

cases. One can use a plot to obtain a rough visual estimate of the location 

for the maximum to start the search near it. 

 

References 

---------- 

.. [1] J.J. Filliben, "The Probability Plot Correlation Coefficient Test for 

Normality", Technometrics, Vol. 17, pp. 111-117, 1975. 

 

.. [2] http://www.itl.nist.gov/div898/handbook/eda/section3/ppccplot.htm 

 

Examples 

-------- 

First we generate some random data from a Tukey-Lambda distribution, 

with shape parameter -0.7: 

 

>>> from scipy import stats 

>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000, 

... random_state=1234567) + 1e4 

 

Now we explore this data with a PPCC plot as well as the related 

probability plot and Box-Cox normplot. A red line is drawn where we 

expect the PPCC value to be maximal (at the shape parameter -0.7 used 

above): 

 

>>> import matplotlib.pyplot as plt 

>>> fig = plt.figure(figsize=(8, 6)) 

>>> ax = fig.add_subplot(111) 

>>> res = stats.ppcc_plot(x, -5, 5, plot=ax) 

 

We calculate the value where the shape should reach its maximum and a red 

line is drawn there. The line should coincide with the highest point in the 

ppcc_plot. 

 

>>> max = stats.ppcc_max(x) 

>>> ax.vlines(max, 0, 1, colors='r', label='Expected shape value') 

 

>>> plt.show() 

 

""" 

dist = _parse_dist_kw(dist) 

osm_uniform = _calc_uniform_order_statistic_medians(len(x)) 

osr = sort(x) 

 

# this function computes the x-axis values of the probability plot 

# and computes a linear regression (including the correlation) 

# and returns 1-r so that a minimization function maximizes the 

# correlation 

def tempfunc(shape, mi, yvals, func): 

xvals = func(mi, shape) 

r, prob = stats.pearsonr(xvals, yvals) 

return 1 - r 

 

return optimize.brent(tempfunc, brack=brack, args=(osm_uniform, osr, dist.ppf)) 

 

 

def ppcc_plot(x, a, b, dist='tukeylambda', plot=None, N=80): 

""" 

Calculate and optionally plot probability plot correlation coefficient. 

 

The probability plot correlation coefficient (PPCC) plot can be used to 

determine the optimal shape parameter for a one-parameter family of 

distributions. It cannot be used for distributions without shape parameters 

(like the normal distribution) or with multiple shape parameters. 

 

By default a Tukey-Lambda distribution (`stats.tukeylambda`) is used. A 

Tukey-Lambda PPCC plot interpolates from long-tailed to short-tailed 

distributions via an approximately normal one, and is therefore particularly 

useful in practice. 

 

Parameters 

---------- 

x : array_like 

Input array. 

a, b: scalar 

Lower and upper bounds of the shape parameter to use. 

dist : str or stats.distributions instance, optional 

Distribution or distribution function name. Objects that look enough 

like a stats.distributions instance (i.e. they have a ``ppf`` method) 

are also accepted. The default is ``'tukeylambda'``. 

plot : object, optional 

If given, plots PPCC against the shape parameter. 

`plot` is an object that has to have methods "plot" and "text". 

The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, 

or a custom object with the same methods. 

Default is None, which means that no plot is created. 

N : int, optional 

Number of points on the horizontal axis (equally distributed from 

`a` to `b`). 

 

Returns 

------- 

svals : ndarray 

The shape values for which `ppcc` was calculated. 

ppcc : ndarray 

The calculated probability plot correlation coefficient values. 

 

See also 

-------- 

ppcc_max, probplot, boxcox_normplot, tukeylambda 

 

References 

---------- 

J.J. Filliben, "The Probability Plot Correlation Coefficient Test for 

Normality", Technometrics, Vol. 17, pp. 111-117, 1975. 

 

Examples 

-------- 

First we generate some random data from a Tukey-Lambda distribution, 

with shape parameter -0.7: 

 

>>> from scipy import stats 

>>> import matplotlib.pyplot as plt 

>>> np.random.seed(1234567) 

>>> x = stats.tukeylambda.rvs(-0.7, loc=2, scale=0.5, size=10000) + 1e4 

 

Now we explore this data with a PPCC plot as well as the related 

probability plot and Box-Cox normplot. A red line is drawn where we 

expect the PPCC value to be maximal (at the shape parameter -0.7 used 

above): 

 

>>> fig = plt.figure(figsize=(12, 4)) 

>>> ax1 = fig.add_subplot(131) 

>>> ax2 = fig.add_subplot(132) 

>>> ax3 = fig.add_subplot(133) 

>>> res = stats.probplot(x, plot=ax1) 

>>> res = stats.boxcox_normplot(x, -5, 5, plot=ax2) 

>>> res = stats.ppcc_plot(x, -5, 5, plot=ax3) 

>>> ax3.vlines(-0.7, 0, 1, colors='r', label='Expected shape value') 

>>> plt.show() 

 

""" 

if b <= a: 

raise ValueError("`b` has to be larger than `a`.") 

 

svals = np.linspace(a, b, num=N) 

ppcc = np.empty_like(svals) 

for k, sval in enumerate(svals): 

_, r2 = probplot(x, sval, dist=dist, fit=True) 

ppcc[k] = r2[-1] 

 

if plot is not None: 

plot.plot(svals, ppcc, 'x') 

_add_axis_labels_title(plot, xlabel='Shape Values', 

ylabel='Prob Plot Corr. Coef.', 

title='(%s) PPCC Plot' % dist) 

 

return svals, ppcc 

 

 

def boxcox_llf(lmb, data): 

r"""The boxcox log-likelihood function. 

 

Parameters 

---------- 

lmb : scalar 

Parameter for Box-Cox transformation. See `boxcox` for details. 

data : array_like 

Data to calculate Box-Cox log-likelihood for. If `data` is 

multi-dimensional, the log-likelihood is calculated along the first 

axis. 

 

Returns 

------- 

llf : float or ndarray 

Box-Cox log-likelihood of `data` given `lmb`. A float for 1-D `data`, 

an array otherwise. 

 

See Also 

-------- 

boxcox, probplot, boxcox_normplot, boxcox_normmax 

 

Notes 

----- 

The Box-Cox log-likelihood function is defined here as 

 

.. math:: 

 

llf = (\lambda - 1) \sum_i(\log(x_i)) - 

N/2 \log(\sum_i (y_i - \bar{y})^2 / N), 

 

where ``y`` is the Box-Cox transformed input data ``x``. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> import matplotlib.pyplot as plt 

>>> from mpl_toolkits.axes_grid1.inset_locator import inset_axes 

>>> np.random.seed(1245) 

 

Generate some random variates and calculate Box-Cox log-likelihood values 

for them for a range of ``lmbda`` values: 

 

>>> x = stats.loggamma.rvs(5, loc=10, size=1000) 

>>> lmbdas = np.linspace(-2, 10) 

>>> llf = np.zeros(lmbdas.shape, dtype=float) 

>>> for ii, lmbda in enumerate(lmbdas): 

... llf[ii] = stats.boxcox_llf(lmbda, x) 

 

Also find the optimal lmbda value with `boxcox`: 

 

>>> x_most_normal, lmbda_optimal = stats.boxcox(x) 

 

Plot the log-likelihood as function of lmbda. Add the optimal lmbda as a 

horizontal line to check that that's really the optimum: 

 

>>> fig = plt.figure() 

>>> ax = fig.add_subplot(111) 

>>> ax.plot(lmbdas, llf, 'b.-') 

>>> ax.axhline(stats.boxcox_llf(lmbda_optimal, x), color='r') 

>>> ax.set_xlabel('lmbda parameter') 

>>> ax.set_ylabel('Box-Cox log-likelihood') 

 

Now add some probability plots to show that where the log-likelihood is 

maximized the data transformed with `boxcox` looks closest to normal: 

 

>>> locs = [3, 10, 4] # 'lower left', 'center', 'lower right' 

>>> for lmbda, loc in zip([-1, lmbda_optimal, 9], locs): 

... xt = stats.boxcox(x, lmbda=lmbda) 

... (osm, osr), (slope, intercept, r_sq) = stats.probplot(xt) 

... ax_inset = inset_axes(ax, width="20%", height="20%", loc=loc) 

... ax_inset.plot(osm, osr, 'c.', osm, slope*osm + intercept, 'k-') 

... ax_inset.set_xticklabels([]) 

... ax_inset.set_yticklabels([]) 

... ax_inset.set_title('$\lambda=%1.2f$' % lmbda) 

 

>>> plt.show() 

 

""" 

data = np.asarray(data) 

N = data.shape[0] 

if N == 0: 

return np.nan 

 

y = boxcox(data, lmb) 

y_mean = np.mean(y, axis=0) 

llf = (lmb - 1) * np.sum(np.log(data), axis=0) 

llf -= N / 2.0 * np.log(np.sum((y - y_mean)**2. / N, axis=0)) 

return llf 

 

 

def _boxcox_conf_interval(x, lmax, alpha): 

# Need to find the lambda for which 

# f(x,lmbda) >= f(x,lmax) - 0.5*chi^2_alpha;1 

fac = 0.5 * distributions.chi2.ppf(1 - alpha, 1) 

target = boxcox_llf(lmax, x) - fac 

 

def rootfunc(lmbda, data, target): 

return boxcox_llf(lmbda, data) - target 

 

# Find positive endpoint of interval in which answer is to be found 

newlm = lmax + 0.5 

N = 0 

while (rootfunc(newlm, x, target) > 0.0) and (N < 500): 

newlm += 0.1 

N += 1 

 

if N == 500: 

raise RuntimeError("Could not find endpoint.") 

 

lmplus = optimize.brentq(rootfunc, lmax, newlm, args=(x, target)) 

 

# Now find negative interval in the same way 

newlm = lmax - 0.5 

N = 0 

while (rootfunc(newlm, x, target) > 0.0) and (N < 500): 

newlm -= 0.1 

N += 1 

 

if N == 500: 

raise RuntimeError("Could not find endpoint.") 

 

lmminus = optimize.brentq(rootfunc, newlm, lmax, args=(x, target)) 

return lmminus, lmplus 

 

 

def boxcox(x, lmbda=None, alpha=None): 

r""" 

Return a positive dataset transformed by a Box-Cox power transformation. 

 

Parameters 

---------- 

x : ndarray 

Input array. Should be 1-dimensional. 

lmbda : {None, scalar}, optional 

If `lmbda` is not None, do the transformation for that value. 

 

If `lmbda` is None, find the lambda that maximizes the log-likelihood 

function and return it as the second output argument. 

alpha : {None, float}, optional 

If ``alpha`` is not None, return the ``100 * (1-alpha)%`` confidence 

interval for `lmbda` as the third output argument. 

Must be between 0.0 and 1.0. 

 

Returns 

------- 

boxcox : ndarray 

Box-Cox power transformed array. 

maxlog : float, optional 

If the `lmbda` parameter is None, the second returned argument is 

the lambda that maximizes the log-likelihood function. 

(min_ci, max_ci) : tuple of float, optional 

If `lmbda` parameter is None and ``alpha`` is not None, this returned 

tuple of floats represents the minimum and maximum confidence limits 

given ``alpha``. 

 

See Also 

-------- 

probplot, boxcox_normplot, boxcox_normmax, boxcox_llf 

 

Notes 

----- 

The Box-Cox transform is given by:: 

 

y = (x**lmbda - 1) / lmbda, for lmbda > 0 

log(x), for lmbda = 0 

 

`boxcox` requires the input data to be positive. Sometimes a Box-Cox 

transformation provides a shift parameter to achieve this; `boxcox` does 

not. Such a shift parameter is equivalent to adding a positive constant to 

`x` before calling `boxcox`. 

 

The confidence limits returned when ``alpha`` is provided give the interval 

where: 

 

.. math:: 

 

llf(\hat{\lambda}) - llf(\lambda) < \frac{1}{2}\chi^2(1 - \alpha, 1), 

 

with ``llf`` the log-likelihood function and :math:`\chi^2` the chi-squared 

function. 

 

References 

---------- 

G.E.P. Box and D.R. Cox, "An Analysis of Transformations", Journal of the 

Royal Statistical Society B, 26, 211-252 (1964). 

 

Examples 

-------- 

>>> from scipy import stats 

>>> import matplotlib.pyplot as plt 

 

We generate some random variates from a non-normal distribution and make a 

probability plot for it, to show it is non-normal in the tails: 

 

>>> fig = plt.figure() 

>>> ax1 = fig.add_subplot(211) 

>>> x = stats.loggamma.rvs(5, size=500) + 5 

>>> prob = stats.probplot(x, dist=stats.norm, plot=ax1) 

>>> ax1.set_xlabel('') 

>>> ax1.set_title('Probplot against normal distribution') 

 

We now use `boxcox` to transform the data so it's closest to normal: 

 

>>> ax2 = fig.add_subplot(212) 

>>> xt, _ = stats.boxcox(x) 

>>> prob = stats.probplot(xt, dist=stats.norm, plot=ax2) 

>>> ax2.set_title('Probplot after Box-Cox transformation') 

 

>>> plt.show() 

 

""" 

x = np.asarray(x) 

if x.size == 0: 

return x 

 

if any(x <= 0): 

raise ValueError("Data must be positive.") 

 

if lmbda is not None: # single transformation 

return special.boxcox(x, lmbda) 

 

# If lmbda=None, find the lmbda that maximizes the log-likelihood function. 

lmax = boxcox_normmax(x, method='mle') 

y = boxcox(x, lmax) 

 

if alpha is None: 

return y, lmax 

else: 

# Find confidence interval 

interval = _boxcox_conf_interval(x, lmax, alpha) 

return y, lmax, interval 

 

 

def boxcox_normmax(x, brack=(-2.0, 2.0), method='pearsonr'): 

"""Compute optimal Box-Cox transform parameter for input data. 

 

Parameters 

---------- 

x : array_like 

Input array. 

brack : 2-tuple, optional 

The starting interval for a downhill bracket search with 

`optimize.brent`. Note that this is in most cases not critical; the 

final result is allowed to be outside this bracket. 

method : str, optional 

The method to determine the optimal transform parameter (`boxcox` 

``lmbda`` parameter). Options are: 

 

'pearsonr' (default) 

Maximizes the Pearson correlation coefficient between 

``y = boxcox(x)`` and the expected values for ``y`` if `x` would be 

normally-distributed. 

 

'mle' 

Minimizes the log-likelihood `boxcox_llf`. This is the method used 

in `boxcox`. 

 

'all' 

Use all optimization methods available, and return all results. 

Useful to compare different methods. 

 

Returns 

------- 

maxlog : float or ndarray 

The optimal transform parameter found. An array instead of a scalar 

for ``method='all'``. 

 

See Also 

-------- 

boxcox, boxcox_llf, boxcox_normplot 

 

Examples 

-------- 

>>> from scipy import stats 

>>> import matplotlib.pyplot as plt 

>>> np.random.seed(1234) # make this example reproducible 

 

Generate some data and determine optimal ``lmbda`` in various ways: 

 

>>> x = stats.loggamma.rvs(5, size=30) + 5 

>>> y, lmax_mle = stats.boxcox(x) 

>>> lmax_pearsonr = stats.boxcox_normmax(x) 

 

>>> lmax_mle 

7.177... 

>>> lmax_pearsonr 

7.916... 

>>> stats.boxcox_normmax(x, method='all') 

array([ 7.91667384, 7.17718692]) 

 

>>> fig = plt.figure() 

>>> ax = fig.add_subplot(111) 

>>> prob = stats.boxcox_normplot(x, -10, 10, plot=ax) 

>>> ax.axvline(lmax_mle, color='r') 

>>> ax.axvline(lmax_pearsonr, color='g', ls='--') 

 

>>> plt.show() 

 

""" 

 

def _pearsonr(x, brack): 

osm_uniform = _calc_uniform_order_statistic_medians(len(x)) 

xvals = distributions.norm.ppf(osm_uniform) 

 

def _eval_pearsonr(lmbda, xvals, samps): 

# This function computes the x-axis values of the probability plot 

# and computes a linear regression (including the correlation) and 

# returns ``1 - r`` so that a minimization function maximizes the 

# correlation. 

y = boxcox(samps, lmbda) 

yvals = np.sort(y) 

r, prob = stats.pearsonr(xvals, yvals) 

return 1 - r 

 

return optimize.brent(_eval_pearsonr, brack=brack, args=(xvals, x)) 

 

def _mle(x, brack): 

def _eval_mle(lmb, data): 

# function to minimize 

return -boxcox_llf(lmb, data) 

 

return optimize.brent(_eval_mle, brack=brack, args=(x,)) 

 

def _all(x, brack): 

maxlog = np.zeros(2, dtype=float) 

maxlog[0] = _pearsonr(x, brack) 

maxlog[1] = _mle(x, brack) 

return maxlog 

 

methods = {'pearsonr': _pearsonr, 

'mle': _mle, 

'all': _all} 

if method not in methods.keys(): 

raise ValueError("Method %s not recognized." % method) 

 

optimfunc = methods[method] 

return optimfunc(x, brack) 

 

 

def boxcox_normplot(x, la, lb, plot=None, N=80): 

"""Compute parameters for a Box-Cox normality plot, optionally show it. 

 

A Box-Cox normality plot shows graphically what the best transformation 

parameter is to use in `boxcox` to obtain a distribution that is close 

to normal. 

 

Parameters 

---------- 

x : array_like 

Input array. 

la, lb : scalar 

The lower and upper bounds for the ``lmbda`` values to pass to `boxcox` 

for Box-Cox transformations. These are also the limits of the 

horizontal axis of the plot if that is generated. 

plot : object, optional 

If given, plots the quantiles and least squares fit. 

`plot` is an object that has to have methods "plot" and "text". 

The `matplotlib.pyplot` module or a Matplotlib Axes object can be used, 

or a custom object with the same methods. 

Default is None, which means that no plot is created. 

N : int, optional 

Number of points on the horizontal axis (equally distributed from 

`la` to `lb`). 

 

Returns 

------- 

lmbdas : ndarray 

The ``lmbda`` values for which a Box-Cox transform was done. 

ppcc : ndarray 

Probability Plot Correlelation Coefficient, as obtained from `probplot` 

when fitting the Box-Cox transformed input `x` against a normal 

distribution. 

 

See Also 

-------- 

probplot, boxcox, boxcox_normmax, boxcox_llf, ppcc_max 

 

Notes 

----- 

Even if `plot` is given, the figure is not shown or saved by 

`boxcox_normplot`; ``plt.show()`` or ``plt.savefig('figname.png')`` 

should be used after calling `probplot`. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> import matplotlib.pyplot as plt 

 

Generate some non-normally distributed data, and create a Box-Cox plot: 

 

>>> x = stats.loggamma.rvs(5, size=500) + 5 

>>> fig = plt.figure() 

>>> ax = fig.add_subplot(111) 

>>> prob = stats.boxcox_normplot(x, -20, 20, plot=ax) 

 

Determine and plot the optimal ``lmbda`` to transform ``x`` and plot it in 

the same plot: 

 

>>> _, maxlog = stats.boxcox(x) 

>>> ax.axvline(maxlog, color='r') 

 

>>> plt.show() 

 

""" 

x = np.asarray(x) 

if x.size == 0: 

return x 

 

if lb <= la: 

raise ValueError("`lb` has to be larger than `la`.") 

 

lmbdas = np.linspace(la, lb, num=N) 

ppcc = lmbdas * 0.0 

for i, val in enumerate(lmbdas): 

# Determine for each lmbda the correlation coefficient of transformed x 

z = boxcox(x, lmbda=val) 

_, r2 = probplot(z, dist='norm', fit=True) 

ppcc[i] = r2[-1] 

 

if plot is not None: 

plot.plot(lmbdas, ppcc, 'x') 

_add_axis_labels_title(plot, xlabel='$\\lambda$', 

ylabel='Prob Plot Corr. Coef.', 

title='Box-Cox Normality Plot') 

 

return lmbdas, ppcc 

 

 

def shapiro(x): 

""" 

Perform the Shapiro-Wilk test for normality. 

 

The Shapiro-Wilk test tests the null hypothesis that the 

data was drawn from a normal distribution. 

 

Parameters 

---------- 

x : array_like 

Array of sample data. 

 

Returns 

------- 

W : float 

The test statistic. 

p-value : float 

The p-value for the hypothesis test. 

 

See Also 

-------- 

anderson : The Anderson-Darling test for normality 

kstest : The Kolmogorov-Smirnov test for goodness of fit. 

 

Notes 

----- 

The algorithm used is described in [4]_ but censoring parameters as 

described are not implemented. For N > 5000 the W test statistic is accurate 

but the p-value may not be. 

 

The chance of rejecting the null hypothesis when it is true is close to 5% 

regardless of sample size. 

 

References 

---------- 

.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm 

.. [2] Shapiro, S. S. & Wilk, M.B (1965). An analysis of variance test for 

normality (complete samples), Biometrika, Vol. 52, pp. 591-611. 

.. [3] Razali, N. M. & Wah, Y. B. (2011) Power comparisons of Shapiro-Wilk, 

Kolmogorov-Smirnov, Lilliefors and Anderson-Darling tests, Journal of 

Statistical Modeling and Analytics, Vol. 2, pp. 21-33. 

.. [4] ALGORITHM AS R94 APPL. STATIST. (1995) VOL. 44, NO. 4. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> np.random.seed(12345678) 

>>> x = stats.norm.rvs(loc=5, scale=3, size=100) 

>>> stats.shapiro(x) 

(0.9772805571556091, 0.08144091814756393) 

 

""" 

x = np.ravel(x) 

 

N = len(x) 

if N < 3: 

raise ValueError("Data must be at least length 3.") 

 

a = zeros(N, 'f') 

init = 0 

 

y = sort(x) 

a, w, pw, ifault = statlib.swilk(y, a[:N//2], init) 

if ifault not in [0, 2]: 

warnings.warn("Input data for shapiro has range zero. The results " 

"may not be accurate.") 

if N > 5000: 

warnings.warn("p-value may not be accurate for N > 5000.") 

 

return w, pw 

 

 

# Values from Stephens, M A, "EDF Statistics for Goodness of Fit and 

# Some Comparisons", Journal of he American Statistical 

# Association, Vol. 69, Issue 347, Sept. 1974, pp 730-737 

_Avals_norm = array([0.576, 0.656, 0.787, 0.918, 1.092]) 

_Avals_expon = array([0.922, 1.078, 1.341, 1.606, 1.957]) 

# From Stephens, M A, "Goodness of Fit for the Extreme Value Distribution", 

# Biometrika, Vol. 64, Issue 3, Dec. 1977, pp 583-588. 

_Avals_gumbel = array([0.474, 0.637, 0.757, 0.877, 1.038]) 

# From Stephens, M A, "Tests of Fit for the Logistic Distribution Based 

# on the Empirical Distribution Function.", Biometrika, 

# Vol. 66, Issue 3, Dec. 1979, pp 591-595. 

_Avals_logistic = array([0.426, 0.563, 0.660, 0.769, 0.906, 1.010]) 

 

 

AndersonResult = namedtuple('AndersonResult', ('statistic', 

'critical_values', 

'significance_level')) 

 

 

def anderson(x, dist='norm'): 

""" 

Anderson-Darling test for data coming from a particular distribution 

 

The Anderson-Darling tests the null hypothesis that a sample is 

drawn from a population that follows a particular distribution. 

For the Anderson-Darling test, the critical values depend on 

which distribution is being tested against. This function works 

for normal, exponential, logistic, or Gumbel (Extreme Value 

Type I) distributions. 

 

Parameters 

---------- 

x : array_like 

array of sample data 

dist : {'norm','expon','logistic','gumbel','gumbel_l', gumbel_r', 

'extreme1'}, optional 

the type of distribution to test against. The default is 'norm' 

and 'extreme1', 'gumbel_l' and 'gumbel' are synonyms. 

 

Returns 

------- 

statistic : float 

The Anderson-Darling test statistic 

critical_values : list 

The critical values for this distribution 

significance_level : list 

The significance levels for the corresponding critical values 

in percents. The function returns critical values for a 

differing set of significance levels depending on the 

distribution that is being tested against. 

 

See Also 

-------- 

kstest : The Kolmogorov-Smirnov test for goodness-of-fit. 

 

Notes 

----- 

Critical values provided are for the following significance levels: 

 

normal/exponenential 

15%, 10%, 5%, 2.5%, 1% 

logistic 

25%, 10%, 5%, 2.5%, 1%, 0.5% 

Gumbel 

25%, 10%, 5%, 2.5%, 1% 

 

If the returned statistic is larger than these critical values then 

for the corresponding significance level, the null hypothesis that 

the data come from the chosen distribution can be rejected. 

The returned statistic is referred to as 'A2' in the references. 

 

References 

---------- 

.. [1] http://www.itl.nist.gov/div898/handbook/prc/section2/prc213.htm 

.. [2] Stephens, M. A. (1974). EDF Statistics for Goodness of Fit and 

Some Comparisons, Journal of the American Statistical Association, 

Vol. 69, pp. 730-737. 

.. [3] Stephens, M. A. (1976). Asymptotic Results for Goodness-of-Fit 

Statistics with Unknown Parameters, Annals of Statistics, Vol. 4, 

pp. 357-369. 

.. [4] Stephens, M. A. (1977). Goodness of Fit for the Extreme Value 

Distribution, Biometrika, Vol. 64, pp. 583-588. 

.. [5] Stephens, M. A. (1977). Goodness of Fit with Special Reference 

to Tests for Exponentiality , Technical Report No. 262, 

Department of Statistics, Stanford University, Stanford, CA. 

.. [6] Stephens, M. A. (1979). Tests of Fit for the Logistic Distribution 

Based on the Empirical Distribution Function, Biometrika, Vol. 66, 

pp. 591-595. 

 

""" 

if dist not in ['norm', 'expon', 'gumbel', 'gumbel_l', 

'gumbel_r', 'extreme1', 'logistic']: 

raise ValueError("Invalid distribution; dist must be 'norm', " 

"'expon', 'gumbel', 'extreme1' or 'logistic'.") 

y = sort(x) 

xbar = np.mean(x, axis=0) 

N = len(y) 

if dist == 'norm': 

s = np.std(x, ddof=1, axis=0) 

w = (y - xbar) / s 

logcdf = distributions.norm.logcdf(w) 

logsf = distributions.norm.logsf(w) 

sig = array([15, 10, 5, 2.5, 1]) 

critical = around(_Avals_norm / (1.0 + 4.0/N - 25.0/N/N), 3) 

elif dist == 'expon': 

w = y / xbar 

logcdf = distributions.expon.logcdf(w) 

logsf = distributions.expon.logsf(w) 

sig = array([15, 10, 5, 2.5, 1]) 

critical = around(_Avals_expon / (1.0 + 0.6/N), 3) 

elif dist == 'logistic': 

def rootfunc(ab, xj, N): 

a, b = ab 

tmp = (xj - a) / b 

tmp2 = exp(tmp) 

val = [np.sum(1.0/(1+tmp2), axis=0) - 0.5*N, 

np.sum(tmp*(1.0-tmp2)/(1+tmp2), axis=0) + N] 

return array(val) 

 

sol0 = array([xbar, np.std(x, ddof=1, axis=0)]) 

sol = optimize.fsolve(rootfunc, sol0, args=(x, N), xtol=1e-5) 

w = (y - sol[0]) / sol[1] 

logcdf = distributions.logistic.logcdf(w) 

logsf = distributions.logistic.logsf(w) 

sig = array([25, 10, 5, 2.5, 1, 0.5]) 

critical = around(_Avals_logistic / (1.0 + 0.25/N), 3) 

elif dist == 'gumbel_r': 

xbar, s = distributions.gumbel_r.fit(x) 

w = (y - xbar) / s 

logcdf = distributions.gumbel_r.logcdf(w) 

logsf = distributions.gumbel_r.logsf(w) 

sig = array([25, 10, 5, 2.5, 1]) 

critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) 

else: # (dist == 'gumbel') or (dist == 'gumbel_l') or (dist == 'extreme1') 

xbar, s = distributions.gumbel_l.fit(x) 

w = (y - xbar) / s 

logcdf = distributions.gumbel_l.logcdf(w) 

logsf = distributions.gumbel_l.logsf(w) 

sig = array([25, 10, 5, 2.5, 1]) 

critical = around(_Avals_gumbel / (1.0 + 0.2/sqrt(N)), 3) 

 

i = arange(1, N + 1) 

A2 = -N - np.sum((2*i - 1.0) / N * (logcdf + logsf[::-1]), axis=0) 

 

return AndersonResult(A2, critical, sig) 

 

 

def _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N): 

""" 

Compute A2akN equation 7 of Scholz and Stephens. 

 

Parameters 

---------- 

samples : sequence of 1-D array_like 

Array of sample arrays. 

Z : array_like 

Sorted array of all observations. 

Zstar : array_like 

Sorted array of unique observations. 

k : int 

Number of samples. 

n : array_like 

Number of observations in each sample. 

N : int 

Total number of observations. 

 

Returns 

------- 

A2aKN : float 

The A2aKN statistics of Scholz and Stephens 1987. 

""" 

 

A2akN = 0. 

Z_ssorted_left = Z.searchsorted(Zstar, 'left') 

if N == Zstar.size: 

lj = 1. 

else: 

lj = Z.searchsorted(Zstar, 'right') - Z_ssorted_left 

Bj = Z_ssorted_left + lj / 2. 

for i in arange(0, k): 

s = np.sort(samples[i]) 

s_ssorted_right = s.searchsorted(Zstar, side='right') 

Mij = s_ssorted_right.astype(float) 

fij = s_ssorted_right - s.searchsorted(Zstar, 'left') 

Mij -= fij / 2. 

inner = lj / float(N) * (N*Mij - Bj*n[i])**2 / (Bj*(N - Bj) - N*lj/4.) 

A2akN += inner.sum() / n[i] 

A2akN *= (N - 1.) / N 

return A2akN 

 

 

def _anderson_ksamp_right(samples, Z, Zstar, k, n, N): 

""" 

Compute A2akN equation 6 of Scholz & Stephens. 

 

Parameters 

---------- 

samples : sequence of 1-D array_like 

Array of sample arrays. 

Z : array_like 

Sorted array of all observations. 

Zstar : array_like 

Sorted array of unique observations. 

k : int 

Number of samples. 

n : array_like 

Number of observations in each sample. 

N : int 

Total number of observations. 

 

Returns 

------- 

A2KN : float 

The A2KN statistics of Scholz and Stephens 1987. 

""" 

 

A2kN = 0. 

lj = Z.searchsorted(Zstar[:-1], 'right') - Z.searchsorted(Zstar[:-1], 

'left') 

Bj = lj.cumsum() 

for i in arange(0, k): 

s = np.sort(samples[i]) 

Mij = s.searchsorted(Zstar[:-1], side='right') 

inner = lj / float(N) * (N * Mij - Bj * n[i])**2 / (Bj * (N - Bj)) 

A2kN += inner.sum() / n[i] 

return A2kN 

 

 

Anderson_ksampResult = namedtuple('Anderson_ksampResult', 

('statistic', 'critical_values', 

'significance_level')) 

 

 

def anderson_ksamp(samples, midrank=True): 

"""The Anderson-Darling test for k-samples. 

 

The k-sample Anderson-Darling test is a modification of the 

one-sample Anderson-Darling test. It tests the null hypothesis 

that k-samples are drawn from the same population without having 

to specify the distribution function of that population. The 

critical values depend on the number of samples. 

 

Parameters 

---------- 

samples : sequence of 1-D array_like 

Array of sample data in arrays. 

midrank : bool, optional 

Type of Anderson-Darling test which is computed. Default 

(True) is the midrank test applicable to continuous and 

discrete populations. If False, the right side empirical 

distribution is used. 

 

Returns 

------- 

statistic : float 

Normalized k-sample Anderson-Darling test statistic. 

critical_values : array 

The critical values for significance levels 25%, 10%, 5%, 2.5%, 1%. 

significance_level : float 

An approximate significance level at which the null hypothesis for the 

provided samples can be rejected. 

 

Raises 

------ 

ValueError 

If less than 2 samples are provided, a sample is empty, or no 

distinct observations are in the samples. 

 

See Also 

-------- 

ks_2samp : 2 sample Kolmogorov-Smirnov test 

anderson : 1 sample Anderson-Darling test 

 

Notes 

----- 

[1]_ Defines three versions of the k-sample Anderson-Darling test: 

one for continuous distributions and two for discrete 

distributions, in which ties between samples may occur. The 

default of this routine is to compute the version based on the 

midrank empirical distribution function. This test is applicable 

to continuous and discrete data. If midrank is set to False, the 

right side empirical distribution is used for a test for discrete 

data. According to [1]_, the two discrete test statistics differ 

only slightly if a few collisions due to round-off errors occur in 

the test not adjusted for ties between samples. 

 

.. versionadded:: 0.14.0 

 

References 

---------- 

.. [1] Scholz, F. W and Stephens, M. A. (1987), K-Sample 

Anderson-Darling Tests, Journal of the American Statistical 

Association, Vol. 82, pp. 918-924. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> np.random.seed(314159) 

 

The null hypothesis that the two random samples come from the same 

distribution can be rejected at the 5% level because the returned 

test value is greater than the critical value for 5% (1.961) but 

not at the 2.5% level. The interpolation gives an approximate 

significance level of 3.1%: 

 

>>> stats.anderson_ksamp([np.random.normal(size=50), 

... np.random.normal(loc=0.5, size=30)]) 

(2.4615796189876105, 

array([ 0.325, 1.226, 1.961, 2.718, 3.752]), 

0.03134990135800783) 

 

 

The null hypothesis cannot be rejected for three samples from an 

identical distribution. The approximate p-value (87%) has to be 

computed by extrapolation and may not be very accurate: 

 

>>> stats.anderson_ksamp([np.random.normal(size=50), 

... np.random.normal(size=30), np.random.normal(size=20)]) 

(-0.73091722665244196, 

array([ 0.44925884, 1.3052767 , 1.9434184 , 2.57696569, 3.41634856]), 

0.8789283903979661) 

 

""" 

k = len(samples) 

if (k < 2): 

raise ValueError("anderson_ksamp needs at least two samples") 

 

samples = list(map(np.asarray, samples)) 

Z = np.sort(np.hstack(samples)) 

N = Z.size 

Zstar = np.unique(Z) 

if Zstar.size < 2: 

raise ValueError("anderson_ksamp needs more than one distinct " 

"observation") 

 

n = np.array([sample.size for sample in samples]) 

if any(n == 0): 

raise ValueError("anderson_ksamp encountered sample without " 

"observations") 

 

if midrank: 

A2kN = _anderson_ksamp_midrank(samples, Z, Zstar, k, n, N) 

else: 

A2kN = _anderson_ksamp_right(samples, Z, Zstar, k, n, N) 

 

H = (1. / n).sum() 

hs_cs = (1. / arange(N - 1, 1, -1)).cumsum() 

h = hs_cs[-1] + 1 

g = (hs_cs / arange(2, N)).sum() 

 

a = (4*g - 6) * (k - 1) + (10 - 6*g)*H 

b = (2*g - 4)*k**2 + 8*h*k + (2*g - 14*h - 4)*H - 8*h + 4*g - 6 

c = (6*h + 2*g - 2)*k**2 + (4*h - 4*g + 6)*k + (2*h - 6)*H + 4*h 

d = (2*h + 6)*k**2 - 4*h*k 

sigmasq = (a*N**3 + b*N**2 + c*N + d) / ((N - 1.) * (N - 2.) * (N - 3.)) 

m = k - 1 

A2 = (A2kN - m) / math.sqrt(sigmasq) 

 

# The b_i values are the interpolation coefficients from Table 2 

# of Scholz and Stephens 1987 

b0 = np.array([0.675, 1.281, 1.645, 1.96, 2.326]) 

b1 = np.array([-0.245, 0.25, 0.678, 1.149, 1.822]) 

b2 = np.array([-0.105, -0.305, -0.362, -0.391, -0.396]) 

critical = b0 + b1 / math.sqrt(m) + b2 / m 

pf = np.polyfit(critical, log(np.array([0.25, 0.1, 0.05, 0.025, 0.01])), 2) 

if A2 < critical.min() or A2 > critical.max(): 

warnings.warn("approximate p-value will be computed by extrapolation") 

try: 

p = math.exp(np.polyval(pf, A2)) 

except (OverflowError,): 

p = float("inf") 

return Anderson_ksampResult(A2, critical, p) 

 

 

AnsariResult = namedtuple('AnsariResult', ('statistic', 'pvalue')) 

 

 

def ansari(x, y): 

""" 

Perform the Ansari-Bradley test for equal scale parameters 

 

The Ansari-Bradley test is a non-parametric test for the equality 

of the scale parameter of the distributions from which two 

samples were drawn. 

 

Parameters 

---------- 

x, y : array_like 

arrays of sample data 

 

Returns 

------- 

statistic : float 

The Ansari-Bradley test statistic 

pvalue : float 

The p-value of the hypothesis test 

 

See Also 

-------- 

fligner : A non-parametric test for the equality of k variances 

mood : A non-parametric test for the equality of two scale parameters 

 

Notes 

----- 

The p-value given is exact when the sample sizes are both less than 

55 and there are no ties, otherwise a normal approximation for the 

p-value is used. 

 

References 

---------- 

.. [1] Sprent, Peter and N.C. Smeeton. Applied nonparametric statistical 

methods. 3rd ed. Chapman and Hall/CRC. 2001. Section 5.8.2. 

 

""" 

x, y = asarray(x), asarray(y) 

n = len(x) 

m = len(y) 

if m < 1: 

raise ValueError("Not enough other observations.") 

if n < 1: 

raise ValueError("Not enough test observations.") 

 

N = m + n 

xy = r_[x, y] # combine 

rank = stats.rankdata(xy) 

symrank = amin(array((rank, N - rank + 1)), 0) 

AB = np.sum(symrank[:n], axis=0) 

uxy = unique(xy) 

repeats = (len(uxy) != len(xy)) 

exact = ((m < 55) and (n < 55) and not repeats) 

if repeats and (m < 55 or n < 55): 

warnings.warn("Ties preclude use of exact statistic.") 

if exact: 

astart, a1, ifault = statlib.gscale(n, m) 

ind = AB - astart 

total = np.sum(a1, axis=0) 

if ind < len(a1)/2.0: 

cind = int(ceil(ind)) 

if ind == cind: 

pval = 2.0 * np.sum(a1[:cind+1], axis=0) / total 

else: 

pval = 2.0 * np.sum(a1[:cind], axis=0) / total 

else: 

find = int(floor(ind)) 

if ind == floor(ind): 

pval = 2.0 * np.sum(a1[find:], axis=0) / total 

else: 

pval = 2.0 * np.sum(a1[find+1:], axis=0) / total 

return AnsariResult(AB, min(1.0, pval)) 

 

# otherwise compute normal approximation 

if N % 2: # N odd 

mnAB = n * (N+1.0)**2 / 4.0 / N 

varAB = n * m * (N+1.0) * (3+N**2) / (48.0 * N**2) 

else: 

mnAB = n * (N+2.0) / 4.0 

varAB = m * n * (N+2) * (N-2.0) / 48 / (N-1.0) 

if repeats: # adjust variance estimates 

# compute np.sum(tj * rj**2,axis=0) 

fac = np.sum(symrank**2, axis=0) 

if N % 2: # N odd 

varAB = m * n * (16*N*fac - (N+1)**4) / (16.0 * N**2 * (N-1)) 

else: # N even 

varAB = m * n * (16*fac - N*(N+2)**2) / (16.0 * N * (N-1)) 

 

z = (AB - mnAB) / sqrt(varAB) 

pval = distributions.norm.sf(abs(z)) * 2.0 

return AnsariResult(AB, pval) 

 

 

BartlettResult = namedtuple('BartlettResult', ('statistic', 'pvalue')) 

 

 

def bartlett(*args): 

""" 

Perform Bartlett's test for equal variances 

 

Bartlett's test tests the null hypothesis that all input samples 

are from populations with equal variances. For samples 

from significantly non-normal populations, Levene's test 

`levene` is more robust. 

 

Parameters 

---------- 

sample1, sample2,... : array_like 

arrays of sample data. May be different lengths. 

 

Returns 

------- 

statistic : float 

The test statistic. 

pvalue : float 

The p-value of the test. 

 

See Also 

-------- 

fligner : A non-parametric test for the equality of k variances 

levene : A robust parametric test for equality of k variances 

 

Notes 

----- 

Conover et al. (1981) examine many of the existing parametric and 

nonparametric tests by extensive simulations and they conclude that the 

tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be 

superior in terms of robustness of departures from normality and power [3]_. 

 

References 

---------- 

.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda357.htm 

 

.. [2] Snedecor, George W. and Cochran, William G. (1989), Statistical 

Methods, Eighth Edition, Iowa State University Press. 

 

.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and 

Hypothesis Testing based on Quadratic Inference Function. Technical 

Report #99-03, Center for Likelihood Studies, Pennsylvania State 

University. 

 

.. [4] Bartlett, M. S. (1937). Properties of Sufficiency and Statistical 

Tests. Proceedings of the Royal Society of London. Series A, 

Mathematical and Physical Sciences, Vol. 160, No.901, pp. 268-282. 

 

""" 

# Handle empty input 

for a in args: 

if np.asanyarray(a).size == 0: 

return BartlettResult(np.nan, np.nan) 

 

k = len(args) 

if k < 2: 

raise ValueError("Must enter at least two input sample vectors.") 

Ni = zeros(k) 

ssq = zeros(k, 'd') 

for j in range(k): 

Ni[j] = len(args[j]) 

ssq[j] = np.var(args[j], ddof=1) 

Ntot = np.sum(Ni, axis=0) 

spsq = np.sum((Ni - 1)*ssq, axis=0) / (1.0*(Ntot - k)) 

numer = (Ntot*1.0 - k) * log(spsq) - np.sum((Ni - 1.0)*log(ssq), axis=0) 

denom = 1.0 + 1.0/(3*(k - 1)) * ((np.sum(1.0/(Ni - 1.0), axis=0)) - 

1.0/(Ntot - k)) 

T = numer / denom 

pval = distributions.chi2.sf(T, k - 1) # 1 - cdf 

 

return BartlettResult(T, pval) 

 

 

LeveneResult = namedtuple('LeveneResult', ('statistic', 'pvalue')) 

 

 

def levene(*args, **kwds): 

""" 

Perform Levene test for equal variances. 

 

The Levene test tests the null hypothesis that all input samples 

are from populations with equal variances. Levene's test is an 

alternative to Bartlett's test `bartlett` in the case where 

there are significant deviations from normality. 

 

Parameters 

---------- 

sample1, sample2, ... : array_like 

The sample data, possibly with different lengths 

center : {'mean', 'median', 'trimmed'}, optional 

Which function of the data to use in the test. The default 

is 'median'. 

proportiontocut : float, optional 

When `center` is 'trimmed', this gives the proportion of data points 

to cut from each end. (See `scipy.stats.trim_mean`.) 

Default is 0.05. 

 

Returns 

------- 

statistic : float 

The test statistic. 

pvalue : float 

The p-value for the test. 

 

Notes 

----- 

Three variations of Levene's test are possible. The possibilities 

and their recommended usages are: 

 

* 'median' : Recommended for skewed (non-normal) distributions> 

* 'mean' : Recommended for symmetric, moderate-tailed distributions. 

* 'trimmed' : Recommended for heavy-tailed distributions. 

 

References 

---------- 

.. [1] http://www.itl.nist.gov/div898/handbook/eda/section3/eda35a.htm 

.. [2] Levene, H. (1960). In Contributions to Probability and Statistics: 

Essays in Honor of Harold Hotelling, I. Olkin et al. eds., 

Stanford University Press, pp. 278-292. 

.. [3] Brown, M. B. and Forsythe, A. B. (1974), Journal of the American 

Statistical Association, 69, 364-367 

 

""" 

# Handle keyword arguments. 

center = 'median' 

proportiontocut = 0.05 

for kw, value in kwds.items(): 

if kw not in ['center', 'proportiontocut']: 

raise TypeError("levene() got an unexpected keyword " 

"argument '%s'" % kw) 

if kw == 'center': 

center = value 

else: 

proportiontocut = value 

 

k = len(args) 

if k < 2: 

raise ValueError("Must enter at least two input sample vectors.") 

Ni = zeros(k) 

Yci = zeros(k, 'd') 

 

if center not in ['mean', 'median', 'trimmed']: 

raise ValueError("Keyword argument <center> must be 'mean', 'median'" 

" or 'trimmed'.") 

 

if center == 'median': 

func = lambda x: np.median(x, axis=0) 

elif center == 'mean': 

func = lambda x: np.mean(x, axis=0) 

else: # center == 'trimmed' 

args = tuple(stats.trimboth(np.sort(arg), proportiontocut) 

for arg in args) 

func = lambda x: np.mean(x, axis=0) 

 

for j in range(k): 

Ni[j] = len(args[j]) 

Yci[j] = func(args[j]) 

Ntot = np.sum(Ni, axis=0) 

 

# compute Zij's 

Zij = [None] * k 

for i in range(k): 

Zij[i] = abs(asarray(args[i]) - Yci[i]) 

 

# compute Zbari 

Zbari = zeros(k, 'd') 

Zbar = 0.0 

for i in range(k): 

Zbari[i] = np.mean(Zij[i], axis=0) 

Zbar += Zbari[i] * Ni[i] 

 

Zbar /= Ntot 

numer = (Ntot - k) * np.sum(Ni * (Zbari - Zbar)**2, axis=0) 

 

# compute denom_variance 

dvar = 0.0 

for i in range(k): 

dvar += np.sum((Zij[i] - Zbari[i])**2, axis=0) 

 

denom = (k - 1.0) * dvar 

 

W = numer / denom 

pval = distributions.f.sf(W, k-1, Ntot-k) # 1 - cdf 

return LeveneResult(W, pval) 

 

 

def binom_test(x, n=None, p=0.5, alternative='two-sided'): 

""" 

Perform a test that the probability of success is p. 

 

This is an exact, two-sided test of the null hypothesis 

that the probability of success in a Bernoulli experiment 

is `p`. 

 

Parameters 

---------- 

x : integer or array_like 

the number of successes, or if x has length 2, it is the 

number of successes and the number of failures. 

n : integer 

the number of trials. This is ignored if x gives both the 

number of successes and failures 

p : float, optional 

The hypothesized probability of success. 0 <= p <= 1. The 

default value is p = 0.5 

alternative : {'two-sided', 'greater', 'less'}, optional 

Indicates the alternative hypothesis. The default value is 

'two-sided'. 

 

Returns 

------- 

p-value : float 

The p-value of the hypothesis test 

 

References 

---------- 

.. [1] http://en.wikipedia.org/wiki/Binomial_test 

 

""" 

x = atleast_1d(x).astype(np.integer) 

if len(x) == 2: 

n = x[1] + x[0] 

x = x[0] 

elif len(x) == 1: 

x = x[0] 

if n is None or n < x: 

raise ValueError("n must be >= x") 

n = np.int_(n) 

else: 

raise ValueError("Incorrect length for x.") 

 

if (p > 1.0) or (p < 0.0): 

raise ValueError("p must be in range [0,1]") 

 

if alternative not in ('two-sided', 'less', 'greater'): 

raise ValueError("alternative not recognized\n" 

"should be 'two-sided', 'less' or 'greater'") 

 

if alternative == 'less': 

pval = distributions.binom.cdf(x, n, p) 

return pval 

 

if alternative == 'greater': 

pval = distributions.binom.sf(x-1, n, p) 

return pval 

 

# if alternative was neither 'less' nor 'greater', then it's 'two-sided' 

d = distributions.binom.pmf(x, n, p) 

rerr = 1 + 1e-7 

if x == p * n: 

# special case as shortcut, would also be handled by `else` below 

pval = 1. 

elif x < p * n: 

i = np.arange(np.ceil(p * n), n+1) 

y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) 

pval = (distributions.binom.cdf(x, n, p) + 

distributions.binom.sf(n - y, n, p)) 

else: 

i = np.arange(np.floor(p*n) + 1) 

y = np.sum(distributions.binom.pmf(i, n, p) <= d*rerr, axis=0) 

pval = (distributions.binom.cdf(y-1, n, p) + 

distributions.binom.sf(x-1, n, p)) 

 

return min(1.0, pval) 

 

 

def _apply_func(x, g, func): 

# g is list of indices into x 

# separating x into different groups 

# func should be applied over the groups 

g = unique(r_[0, g, len(x)]) 

output = [] 

for k in range(len(g) - 1): 

output.append(func(x[g[k]:g[k+1]])) 

 

return asarray(output) 

 

 

FlignerResult = namedtuple('FlignerResult', ('statistic', 'pvalue')) 

 

 

def fligner(*args, **kwds): 

""" 

Perform Fligner-Killeen test for equality of variance. 

 

Fligner's test tests the null hypothesis that all input samples 

are from populations with equal variances. Fligner-Killeen's test is 

distribution free when populations are identical [2]_. 

 

Parameters 

---------- 

sample1, sample2, ... : array_like 

Arrays of sample data. Need not be the same length. 

center : {'mean', 'median', 'trimmed'}, optional 

Keyword argument controlling which function of the data is used in 

computing the test statistic. The default is 'median'. 

proportiontocut : float, optional 

When `center` is 'trimmed', this gives the proportion of data points 

to cut from each end. (See `scipy.stats.trim_mean`.) 

Default is 0.05. 

 

Returns 

------- 

statistic : float 

The test statistic. 

pvalue : float 

The p-value for the hypothesis test. 

 

See Also 

-------- 

bartlett : A parametric test for equality of k variances in normal samples 

levene : A robust parametric test for equality of k variances 

 

Notes 

----- 

As with Levene's test there are three variants of Fligner's test that 

differ by the measure of central tendency used in the test. See `levene` 

for more information. 

 

Conover et al. (1981) examine many of the existing parametric and 

nonparametric tests by extensive simulations and they conclude that the 

tests proposed by Fligner and Killeen (1976) and Levene (1960) appear to be 

superior in terms of robustness of departures from normality and power [3]_. 

 

References 

---------- 

.. [1] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and 

Hypothesis Testing based on Quadratic Inference Function. Technical 

Report #99-03, Center for Likelihood Studies, Pennsylvania State 

University. 

http://cecas.clemson.edu/~cspark/cv/paper/qif/draftqif2.pdf 

 

.. [2] Fligner, M.A. and Killeen, T.J. (1976). Distribution-free two-sample 

tests for scale. 'Journal of the American Statistical Association.' 

71(353), 210-213. 

 

.. [3] Park, C. and Lindsay, B. G. (1999). Robust Scale Estimation and 

Hypothesis Testing based on Quadratic Inference Function. Technical 

Report #99-03, Center for Likelihood Studies, Pennsylvania State 

University. 

 

.. [4] Conover, W. J., Johnson, M. E. and Johnson M. M. (1981). A 

comparative study of tests for homogeneity of variances, with 

applications to the outer continental shelf biding data. 

Technometrics, 23(4), 351-361. 

 

""" 

# Handle empty input 

for a in args: 

if np.asanyarray(a).size == 0: 

return FlignerResult(np.nan, np.nan) 

 

# Handle keyword arguments. 

center = 'median' 

proportiontocut = 0.05 

for kw, value in kwds.items(): 

if kw not in ['center', 'proportiontocut']: 

raise TypeError("fligner() got an unexpected keyword " 

"argument '%s'" % kw) 

if kw == 'center': 

center = value 

else: 

proportiontocut = value 

 

k = len(args) 

if k < 2: 

raise ValueError("Must enter at least two input sample vectors.") 

 

if center not in ['mean', 'median', 'trimmed']: 

raise ValueError("Keyword argument <center> must be 'mean', 'median'" 

" or 'trimmed'.") 

 

if center == 'median': 

func = lambda x: np.median(x, axis=0) 

elif center == 'mean': 

func = lambda x: np.mean(x, axis=0) 

else: # center == 'trimmed' 

args = tuple(stats.trimboth(arg, proportiontocut) for arg in args) 

func = lambda x: np.mean(x, axis=0) 

 

Ni = asarray([len(args[j]) for j in range(k)]) 

Yci = asarray([func(args[j]) for j in range(k)]) 

Ntot = np.sum(Ni, axis=0) 

# compute Zij's 

Zij = [abs(asarray(args[i]) - Yci[i]) for i in range(k)] 

allZij = [] 

g = [0] 

for i in range(k): 

allZij.extend(list(Zij[i])) 

g.append(len(allZij)) 

 

ranks = stats.rankdata(allZij) 

a = distributions.norm.ppf(ranks / (2*(Ntot + 1.0)) + 0.5) 

 

# compute Aibar 

Aibar = _apply_func(a, g, np.sum) / Ni 

anbar = np.mean(a, axis=0) 

varsq = np.var(a, axis=0, ddof=1) 

Xsq = np.sum(Ni * (asarray(Aibar) - anbar)**2.0, axis=0) / varsq 

pval = distributions.chi2.sf(Xsq, k - 1) # 1 - cdf 

return FlignerResult(Xsq, pval) 

 

 

def mood(x, y, axis=0): 

""" 

Perform Mood's test for equal scale parameters. 

 

Mood's two-sample test for scale parameters is a non-parametric 

test for the null hypothesis that two samples are drawn from the 

same distribution with the same scale parameter. 

 

Parameters 

---------- 

x, y : array_like 

Arrays of sample data. 

axis : int, optional 

The axis along which the samples are tested. `x` and `y` can be of 

different length along `axis`. 

If `axis` is None, `x` and `y` are flattened and the test is done on 

all values in the flattened arrays. 

 

Returns 

------- 

z : scalar or ndarray 

The z-score for the hypothesis test. For 1-D inputs a scalar is 

returned. 

p-value : scalar ndarray 

The p-value for the hypothesis test. 

 

See Also 

-------- 

fligner : A non-parametric test for the equality of k variances 

ansari : A non-parametric test for the equality of 2 variances 

bartlett : A parametric test for equality of k variances in normal samples 

levene : A parametric test for equality of k variances 

 

Notes 

----- 

The data are assumed to be drawn from probability distributions ``f(x)`` 

and ``f(x/s) / s`` respectively, for some probability density function f. 

The null hypothesis is that ``s == 1``. 

 

For multi-dimensional arrays, if the inputs are of shapes 

``(n0, n1, n2, n3)`` and ``(n0, m1, n2, n3)``, then if ``axis=1``, the 

resulting z and p values will have shape ``(n0, n2, n3)``. Note that 

``n1`` and ``m1`` don't have to be equal, but the other dimensions do. 

 

Examples 

-------- 

>>> from scipy import stats 

>>> np.random.seed(1234) 

>>> x2 = np.random.randn(2, 45, 6, 7) 

>>> x1 = np.random.randn(2, 30, 6, 7) 

>>> z, p = stats.mood(x1, x2, axis=1) 

>>> p.shape 

(2, 6, 7) 

 

Find the number of points where the difference in scale is not significant: 

 

>>> (p > 0.1).sum() 

74 

 

Perform the test with different scales: 

 

>>> x1 = np.random.randn(2, 30) 

>>> x2 = np.random.randn(2, 35) * 10.0 

>>> stats.mood(x1, x2, axis=1) 

(array([-5.7178125 , -5.25342163]), array([ 1.07904114e-08, 1.49299218e-07])) 

 

""" 

x = np.asarray(x, dtype=float) 

y = np.asarray(y, dtype=float) 

 

if axis is None: 

x = x.flatten() 

y = y.flatten() 

axis = 0 

 

# Determine shape of the result arrays 

res_shape = tuple([x.shape[ax] for ax in range(len(x.shape)) if ax != axis]) 

if not (res_shape == tuple([y.shape[ax] for ax in range(len(y.shape)) if 

ax != axis])): 

raise ValueError("Dimensions of x and y on all axes except `axis` " 

"should match") 

 

n = x.shape[axis] 

m = y.shape[axis] 

N = m + n 

if N < 3: 

raise ValueError("Not enough observations.") 

 

xy = np.concatenate((x, y), axis=axis) 

if axis != 0: 

xy = np.rollaxis(xy, axis) 

 

xy = xy.reshape(xy.shape[0], -1) 

 

# Generalized to the n-dimensional case by adding the axis argument, and 

# using for loops, since rankdata is not vectorized. For improving 

# performance consider vectorizing rankdata function. 

all_ranks = np.zeros_like(xy) 

for j in range(xy.shape[1]): 

all_ranks[:, j] = stats.rankdata(xy[:, j]) 

 

Ri = all_ranks[:n] 

M = np.sum((Ri - (N + 1.0) / 2)**2, axis=0) 

# Approx stat. 

mnM = n * (N * N - 1.0) / 12 

varM = m * n * (N + 1.0) * (N + 2) * (N - 2) / 180 

z = (M - mnM) / sqrt(varM) 

 

# sf for right tail, cdf for left tail. Factor 2 for two-sidedness 

z_pos = z > 0 

pval = np.zeros_like(z) 

pval[z_pos] = 2 * distributions.norm.sf(z[z_pos]) 

pval[~z_pos] = 2 * distributions.norm.cdf(z[~z_pos]) 

 

if res_shape == (): 

# Return scalars, not 0-D arrays 

z = z[0] 

pval = pval[0] 

else: 

z.shape = res_shape 

pval.shape = res_shape 

 

return z, pval 

 

 

WilcoxonResult = namedtuple('WilcoxonResult', ('statistic', 'pvalue')) 

 

 

def wilcoxon(x, y=None, zero_method="wilcox", correction=False): 

""" 

Calculate the Wilcoxon signed-rank test. 

 

The Wilcoxon signed-rank test tests the null hypothesis that two 

related paired samples come from the same distribution. In particular, 

it tests whether the distribution of the differences x - y is symmetric 

about zero. It is a non-parametric version of the paired T-test. 

 

Parameters 

---------- 

x : array_like 

The first set of measurements. 

y : array_like, optional 

The second set of measurements. If `y` is not given, then the `x` 

array is considered to be the differences between the two sets of 

measurements. 

zero_method : string, {"pratt", "wilcox", "zsplit"}, optional 

"pratt": 

Pratt treatment: includes zero-differences in the ranking process 

(more conservative) 

"wilcox": 

Wilcox treatment: discards all zero-differences 

"zsplit": 

Zero rank split: just like Pratt, but spliting the zero rank 

between positive and negative ones 

correction : bool, optional 

If True, apply continuity correction by adjusting the Wilcoxon rank 

statistic by 0.5 towards the mean value when computing the 

z-statistic. Default is False. 

 

Returns 

------- 

statistic : float 

The sum of the ranks of the differences above or below zero, whichever 

is smaller. 

pvalue : float 

The two-sided p-value for the test. 

 

Notes 

----- 

Because the normal approximation is used for the calculations, the 

samples used should be large. A typical rule is to require that 

n > 20. 

 

References 

---------- 

.. [1] http://en.wikipedia.org/wiki/Wilcoxon_signed-rank_test 

 

""" 

 

if zero_method not in ["wilcox", "pratt", "zsplit"]: 

raise ValueError("Zero method should be either 'wilcox' " 

"or 'pratt' or 'zsplit'") 

 

if y is None: 

d = asarray(x) 

else: 

x, y = map(asarray, (x, y)) 

if len(x) != len(y): 

raise ValueError('Unequal N in wilcoxon. Aborting.') 

d = x - y 

 

if zero_method == "wilcox": 

# Keep all non-zero differences 

d = compress(np.not_equal(d, 0), d, axis=-1) 

 

count = len(d) 

if count < 10: 

warnings.warn("Warning: sample size too small for normal approximation.") 

 

r = stats.rankdata(abs(d)) 

r_plus = np.sum((d > 0) * r, axis=0) 

r_minus = np.sum((d < 0) * r, axis=0) 

 

if zero_method == "zsplit": 

r_zero = np.sum((d == 0) * r, axis=0) 

r_plus += r_zero / 2. 

r_minus += r_zero / 2. 

 

T = min(r_plus, r_minus) 

mn = count * (count + 1.) * 0.25 

se = count * (count + 1.) * (2. * count + 1.) 

 

if zero_method == "pratt": 

r = r[d != 0] 

 

replist, repnum = find_repeats(r) 

if repnum.size != 0: 

# Correction for repeated elements. 

se -= 0.5 * (repnum * (repnum * repnum - 1)).sum() 

 

se = sqrt(se / 24) 

correction = 0.5 * int(bool(correction)) * np.sign(T - mn) 

z = (T - mn - correction) / se 

prob = 2. * distributions.norm.sf(abs(z)) 

 

return WilcoxonResult(T, prob) 

 

 

def median_test(*args, **kwds): 

""" 

Mood's median test. 

 

Test that two or more samples come from populations with the same median. 

 

Let ``n = len(args)`` be the number of samples. The "grand median" of 

all the data is computed, and a contingency table is formed by 

classifying the values in each sample as being above or below the grand 

median. The contingency table, along with `correction` and `lambda_`, 

are passed to `scipy.stats.chi2_contingency` to compute the test statistic 

and p-value. 

 

Parameters 

---------- 

sample1, sample2, ... : array_like 

The set of samples. There must be at least two samples. 

Each sample must be a one-dimensional sequence containing at least 

one value. The samples are not required to have the same length. 

ties : str, optional 

Determines how values equal to the grand median are classified in 

the contingency table. The string must be one of:: 

 

"below": 

Values equal to the grand median are counted as "below". 

"above": 

Values equal to the grand median are counted as "above". 

"ignore": 

Values equal to the grand median are not counted. 

 

The default is "below". 

correction : bool, optional 

If True, *and* there are just two samples, apply Yates' correction 

for continuity when computing the test statistic associated with 

the contingency table. Default is True. 

lambda_ : float or str, optional. 

By default, the statistic computed in this test is Pearson's 

chi-squared statistic. `lambda_` allows a statistic from the 

Cressie-Read power divergence family to be used instead. See 

`power_divergence` for details. 

Default is 1 (Pearson's chi-squared statistic). 

nan_policy : {'propagate', 'raise', 'omit'}, optional 

Defines how to handle when input contains nan. 'propagate' returns nan, 

'raise' throws an error, 'omit' performs the calculations ignoring nan 

values. Default is 'propagate'. 

 

Returns 

------- 

stat : float 

The test statistic. The statistic that is returned is determined by 

`lambda_`. The default is Pearson's chi-squared statistic. 

p : float 

The p-value of the test. 

m : float 

The grand median. 

table : ndarray 

The contingency table. The shape of the table is (2, n), where 

n is the number of samples. The first row holds the counts of the 

values above the grand median, and the second row holds the counts 

of the values below the grand median. The table allows further 

analysis with, for example, `scipy.stats.chi2_contingency`, or with 

`scipy.stats.fisher_exact` if there are two samples, without having 

to recompute the table. If ``nan_policy`` is "propagate" and there 

are nans in the input, the return value for ``table`` is ``None``. 

 

See Also 

-------- 

kruskal : Compute the Kruskal-Wallis H-test for independent samples. 

mannwhitneyu : Computes the Mann-Whitney rank test on samples x and y. 

 

Notes 

----- 

.. versionadded:: 0.15.0 

 

References 

---------- 

.. [1] Mood, A. M., Introduction to the Theory of Statistics. McGraw-Hill 

(1950), pp. 394-399. 

.. [2] Zar, J. H., Biostatistical Analysis, 5th ed. Prentice Hall (2010). 

See Sections 8.12 and 10.15. 

 

Examples 

-------- 

A biologist runs an experiment in which there are three groups of plants. 

Group 1 has 16 plants, group 2 has 15 plants, and group 3 has 17 plants. 

Each plant produces a number of seeds. The seed counts for each group 

are:: 

 

Group 1: 10 14 14 18 20 22 24 25 31 31 32 39 43 43 48 49 

Group 2: 28 30 31 33 34 35 36 40 44 55 57 61 91 92 99 

Group 3: 0 3 9 22 23 25 25 33 34 34 40 45 46 48 62 67 84 

 

The following code applies Mood's median test to these samples. 

 

>>> g1 = [10, 14, 14, 18, 20, 22, 24, 25, 31, 31, 32, 39, 43, 43, 48, 49] 

>>> g2 = [28, 30, 31, 33, 34, 35, 36, 40, 44, 55, 57, 61, 91, 92, 99] 

>>> g3 = [0, 3, 9, 22, 23, 25, 25, 33, 34, 34, 40, 45, 46, 48, 62, 67, 84] 

>>> from scipy.stats import median_test 

>>> stat, p, med, tbl = median_test(g1, g2, g3) 

 

The median is 

 

>>> med 

34.0 

 

and the contingency table is 

 

>>> tbl 

array([[ 5, 10, 7], 

[11, 5, 10]]) 

 

`p` is too large to conclude that the medians are not the same: 

 

>>> p 

0.12609082774093244 

 

The "G-test" can be performed by passing ``lambda_="log-likelihood"`` to 

`median_test`. 

 

>>> g, p, med, tbl = median_test(g1, g2, g3, lambda_="log-likelihood") 

>>> p 

0.12224779737117837 

 

The median occurs several times in the data, so we'll get a different 

result if, for example, ``ties="above"`` is used: 

 

>>> stat, p, med, tbl = median_test(g1, g2, g3, ties="above") 

>>> p 

0.063873276069553273 

 

>>> tbl 

array([[ 5, 11, 9], 

[11, 4, 8]]) 

 

This example demonstrates that if the data set is not large and there 

are values equal to the median, the p-value can be sensitive to the 

choice of `ties`. 

 

""" 

ties = kwds.pop('ties', 'below') 

correction = kwds.pop('correction', True) 

lambda_ = kwds.pop('lambda_', None) 

nan_policy = kwds.pop('nan_policy', 'propagate') 

 

if len(kwds) > 0: 

bad_kwd = kwds.keys()[0] 

raise TypeError("median_test() got an unexpected keyword " 

"argument %r" % bad_kwd) 

 

if len(args) < 2: 

raise ValueError('median_test requires two or more samples.') 

 

ties_options = ['below', 'above', 'ignore'] 

if ties not in ties_options: 

raise ValueError("invalid 'ties' option '%s'; 'ties' must be one " 

"of: %s" % (ties, str(ties_options)[1:-1])) 

 

data = [np.asarray(arg) for arg in args] 

 

# Validate the sizes and shapes of the arguments. 

for k, d in enumerate(data): 

if d.size == 0: 

raise ValueError("Sample %d is empty. All samples must " 

"contain at least one value." % (k + 1)) 

if d.ndim != 1: 

raise ValueError("Sample %d has %d dimensions. All " 

"samples must be one-dimensional sequences." % 

(k + 1, d.ndim)) 

 

cdata = np.concatenate(data) 

contains_nan, nan_policy = _contains_nan(cdata, nan_policy) 

if contains_nan and nan_policy == 'propagate': 

return np.nan, np.nan, np.nan, None 

 

if contains_nan: 

grand_median = np.median(cdata[~np.isnan(cdata)]) 

else: 

grand_median = np.median(cdata) 

# When the minimum version of numpy supported by scipy is 1.9.0, 

# the above if/else statement can be replaced by the single line: 

# grand_median = np.nanmedian(cdata) 

 

# Create the contingency table. 

table = np.zeros((2, len(data)), dtype=np.int64) 

for k, sample in enumerate(data): 

sample = sample[~np.isnan(sample)] 

 

nabove = count_nonzero(sample > grand_median) 

nbelow = count_nonzero(sample < grand_median) 

nequal = sample.size - (nabove + nbelow) 

table[0, k] += nabove 

table[1, k] += nbelow 

if ties == "below": 

table[1, k] += nequal 

elif ties == "above": 

table[0, k] += nequal 

 

# Check that no row or column of the table is all zero. 

# Such a table can not be given to chi2_contingency, because it would have 

# a zero in the table of expected frequencies. 

rowsums = table.sum(axis=1) 

if rowsums[0] == 0: 

raise ValueError("All values are below the grand median (%r)." % 

grand_median) 

if rowsums[1] == 0: 

raise ValueError("All values are above the grand median (%r)." % 

grand_median) 

if ties == "ignore": 

# We already checked that each sample has at least one value, but it 

# is possible that all those values equal the grand median. If `ties` 

# is "ignore", that would result in a column of zeros in `table`. We 

# check for that case here. 

zero_cols = np.where((table == 0).all(axis=0))[0] 

if len(zero_cols) > 0: 

msg = ("All values in sample %d are equal to the grand " 

"median (%r), so they are ignored, resulting in an " 

"empty sample." % (zero_cols[0] + 1, grand_median)) 

raise ValueError(msg) 

 

stat, p, dof, expected = chi2_contingency(table, lambda_=lambda_, 

correction=correction) 

return stat, p, grand_median, table 

 

 

def _circfuncs_common(samples, high, low): 

samples = np.asarray(samples) 

if samples.size == 0: 

return np.nan, np.nan 

 

ang = (samples - low)*2.*pi / (high - low) 

return samples, ang 

 

 

def circmean(samples, high=2*pi, low=0, axis=None): 

""" 

Compute the circular mean for samples in a range. 

 

Parameters 

---------- 

samples : array_like 

Input array. 

high : float or int, optional 

High boundary for circular mean range. Default is ``2*pi``. 

low : float or int, optional 

Low boundary for circular mean range. Default is 0. 

axis : int, optional 

Axis along which means are computed. The default is to compute 

the mean of the flattened array. 

 

Returns 

------- 

circmean : float 

Circular mean. 

 

Examples 

-------- 

>>> from scipy.stats import circmean 

>>> circmean([0.1, 2*np.pi+0.2, 6*np.pi+0.3]) 

0.2 

 

>>> from scipy.stats import circmean 

>>> circmean([0.2, 1.4, 2.6], high = 1, low = 0) 

0.4 

 

""" 

samples, ang = _circfuncs_common(samples, high, low) 

S = sin(ang).sum(axis=axis) 

C = cos(ang).sum(axis=axis) 

res = arctan2(S, C) 

mask = res < 0 

if mask.ndim > 0: 

res[mask] += 2*pi 

elif mask: 

res += 2*pi 

return res*(high - low)/2.0/pi + low 

 

 

def circvar(samples, high=2*pi, low=0, axis=None): 

""" 

Compute the circular variance for samples assumed to be in a range 

 

Parameters 

---------- 

samples : array_like 

Input array. 

low : float or int, optional 

Low boundary for circular variance range. Default is 0. 

high : float or int, optional 

High boundary for circular variance range. Default is ``2*pi``. 

axis : int, optional 

Axis along which variances are computed. The default is to compute 

the variance of the flattened array. 

 

Returns 

------- 

circvar : float 

Circular variance. 

 

Notes 

----- 

This uses a definition of circular variance that in the limit of small 

angles returns a number close to the 'linear' variance. 

 

Examples 

-------- 

>>> from scipy.stats import circvar 

>>> circvar([0, 2*np.pi/3, 5*np.pi/3]) 

2.19722457734 

 

""" 

samples, ang = _circfuncs_common(samples, high, low) 

S = sin(ang).mean(axis=axis) 

C = cos(ang).mean(axis=axis) 

R = hypot(S, C) 

return ((high - low)/2.0/pi)**2 * 2 * log(1/R) 

 

 

def circstd(samples, high=2*pi, low=0, axis=None): 

""" 

Compute the circular standard deviation for samples assumed to be in the 

range [low to high]. 

 

Parameters 

---------- 

samples : array_like 

Input array. 

low : float or int, optional 

Low boundary for circular standard deviation range. Default is 0. 

high : float or int, optional 

High boundary for circular standard deviation range. 

Default is ``2*pi``. 

axis : int, optional 

Axis along which standard deviations are computed. The default is 

to compute the standard deviation of the flattened array. 

 

Returns 

------- 

circstd : float 

Circular standard deviation. 

 

Notes 

----- 

This uses a definition of circular standard deviation that in the limit of 

small angles returns a number close to the 'linear' standard deviation. 

 

Examples 

-------- 

>>> from scipy.stats import circstd 

>>> circstd([0, 0.1*np.pi/2, 0.001*np.pi, 0.03*np.pi/2]) 

0.063564063306 

 

""" 

samples, ang = _circfuncs_common(samples, high, low) 

S = sin(ang).mean(axis=axis) 

C = cos(ang).mean(axis=axis) 

R = hypot(S, C) 

return ((high - low)/2.0/pi) * sqrt(-2*log(R))